From d250ab3f1c9ed29a530360899445f2d8714fc157 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 7 Dec 2023 15:26:01 -0500 Subject: [PATCH 01/80] feat: [google-cloud-advisorynotifications] adding project level methods to advisorynotifications.googleapis.com (#12096) - [ ] Regenerate this pull request now. BEGIN_COMMIT_OVERRIDE feat: adding project level methods to advisorynotifications.googleapis.com docs: adding docs for new project level methods feat: adding GetNotification and ListNotifications methods for notifications parented at the project level END_COMMIT_OVERRIDE PiperOrigin-RevId: 588795150 Source-Link: https://github.com/googleapis/googleapis/commit/967b05bf1cc26a70e1ddaa4594058ae0d8fa8885 Source-Link: https://github.com/googleapis/googleapis-gen/commit/bdd6f86b871b17dfd410ec3a21761f3f64d82277 Copy-Tag: eyJwIjoicGFja2FnZXMvZ29vZ2xlLWNsb3VkLWFkdmlzb3J5bm90aWZpY2F0aW9ucy8uT3dsQm90LnlhbWwiLCJoIjoiYmRkNmY4NmI4NzFiMTdkZmQ0MTBlYzNhMjE3NjFmM2Y2NGQ4MjI3NyJ9 --------- Co-authored-by: Owl Bot --- .../advisory_notifications_service/async_client.py | 8 ++++++-- .../services/advisory_notifications_service/client.py | 8 ++++++-- .../advisory_notifications_service/transports/rest.py | 8 ++++++++ .../cloud/advisorynotifications_v1/types/service.py | 11 ++++++++--- 4 files changed, 28 insertions(+), 7 deletions(-) diff --git a/packages/google-cloud-advisorynotifications/google/cloud/advisorynotifications_v1/services/advisory_notifications_service/async_client.py b/packages/google-cloud-advisorynotifications/google/cloud/advisorynotifications_v1/services/advisory_notifications_service/async_client.py index 4e6dc1692cc6..47b8c7eab812 100644 --- a/packages/google-cloud-advisorynotifications/google/cloud/advisorynotifications_v1/services/advisory_notifications_service/async_client.py +++ b/packages/google-cloud-advisorynotifications/google/cloud/advisorynotifications_v1/services/advisory_notifications_service/async_client.py @@ -279,7 +279,9 @@ async def sample_list_notifications(): Required. The parent, which owns this collection of notifications. Must be of the form - "organizations/{organization}/locations/{location}". + "organizations/{organization}/locations/{location}" + or + "projects/{project}/locations/{location}" This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -403,7 +405,9 @@ async def sample_get_notification(): Required. A name of the notification to retrieve. Format: - organizations/{organization}/locations/{location}/notifications/{notification}. + organizations/{organization}/locations/{location}/notifications/{notification} + or + projects/{projects}/locations/{location}/notifications/{notification}. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/packages/google-cloud-advisorynotifications/google/cloud/advisorynotifications_v1/services/advisory_notifications_service/client.py b/packages/google-cloud-advisorynotifications/google/cloud/advisorynotifications_v1/services/advisory_notifications_service/client.py index 206cc2793c33..342e341d8929 100644 --- a/packages/google-cloud-advisorynotifications/google/cloud/advisorynotifications_v1/services/advisory_notifications_service/client.py +++ b/packages/google-cloud-advisorynotifications/google/cloud/advisorynotifications_v1/services/advisory_notifications_service/client.py @@ -517,7 +517,9 @@ def sample_list_notifications(): Required. The parent, which owns this collection of notifications. Must be of the form - "organizations/{organization}/locations/{location}". + "organizations/{organization}/locations/{location}" + or + "projects/{project}/locations/{location}" This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -632,7 +634,9 @@ def sample_get_notification(): Required. A name of the notification to retrieve. Format: - organizations/{organization}/locations/{location}/notifications/{notification}. + organizations/{organization}/locations/{location}/notifications/{notification} + or + projects/{projects}/locations/{location}/notifications/{notification}. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/packages/google-cloud-advisorynotifications/google/cloud/advisorynotifications_v1/services/advisory_notifications_service/transports/rest.py b/packages/google-cloud-advisorynotifications/google/cloud/advisorynotifications_v1/services/advisory_notifications_service/transports/rest.py index 5e6bca71b7e6..3fa6d6544ea8 100644 --- a/packages/google-cloud-advisorynotifications/google/cloud/advisorynotifications_v1/services/advisory_notifications_service/transports/rest.py +++ b/packages/google-cloud-advisorynotifications/google/cloud/advisorynotifications_v1/services/advisory_notifications_service/transports/rest.py @@ -330,6 +330,10 @@ def __call__( "method": "get", "uri": "/v1/{name=organizations/*/locations/*/notifications/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notifications/*}", + }, ] request, metadata = self._interceptor.pre_get_notification( request, metadata @@ -507,6 +511,10 @@ def __call__( "method": "get", "uri": "/v1/{parent=organizations/*/locations/*}/notifications", }, + { + "method": "get", + "uri": "/v1/{parent=projects/*/locations/*}/notifications", + }, ] request, metadata = self._interceptor.pre_list_notifications( request, metadata diff --git a/packages/google-cloud-advisorynotifications/google/cloud/advisorynotifications_v1/types/service.py b/packages/google-cloud-advisorynotifications/google/cloud/advisorynotifications_v1/types/service.py index 3ff314dd441b..296543a55cfb 100644 --- a/packages/google-cloud-advisorynotifications/google/cloud/advisorynotifications_v1/types/service.py +++ b/packages/google-cloud-advisorynotifications/google/cloud/advisorynotifications_v1/types/service.py @@ -123,7 +123,9 @@ class Notification(proto.Message): The resource name of the notification. Format: - organizations/{organization}/locations/{location}/notifications/{notification}. + organizations/{organization}/locations/{location}/notifications/{notification} + or + projects/{project}/locations/{location}/notifications/{notification}. subject (google.cloud.advisorynotifications_v1.types.Subject): The subject line of the notification. messages (MutableSequence[google.cloud.advisorynotifications_v1.types.Message]): @@ -327,7 +329,8 @@ class ListNotificationsRequest(proto.Message): parent (str): Required. The parent, which owns this collection of notifications. Must be of the form - "organizations/{organization}/locations/{location}". + "organizations/{organization}/locations/{location}" + or "projects/{project}/locations/{location}". page_size (int): The maximum number of notifications to return. The service may return fewer than this @@ -418,7 +421,9 @@ class GetNotificationRequest(proto.Message): Required. A name of the notification to retrieve. Format: - organizations/{organization}/locations/{location}/notifications/{notification}. + organizations/{organization}/locations/{location}/notifications/{notification} + or + projects/{projects}/locations/{location}/notifications/{notification}. language_code (str): ISO code for requested localization language. If unset, will be interpereted as "en". If the From a74938fa2ed19348d703d23ffb13545423e8b736 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 7 Dec 2023 16:24:56 -0500 Subject: [PATCH 02/80] docs: [google-maps-places] change comments for some fields in Places API (#12101) - [ ] Regenerate this pull request now. BEGIN_COMMIT_OVERRIDE docs: Change comments for some fields in Places API feat: Add new wheelchair accessibility fields feat: Add new primary type fields feat: Add new short formatted address field END_COMMIT_OVERRIDE PiperOrigin-RevId: 588852313 Source-Link: https://github.com/googleapis/googleapis/commit/fe383674a9f63842ea07cd7671ca9020add41e98 Source-Link: https://github.com/googleapis/googleapis-gen/commit/d1a3cba834ead4c8a93644dd84ff0d2b0fc57b25 Copy-Tag: eyJwIjoicGFja2FnZXMvZ29vZ2xlLW1hcHMtcGxhY2VzLy5Pd2xCb3QueWFtbCIsImgiOiJkMWEzY2JhODM0ZWFkNGM4YTkzNjQ0ZGQ4NGZmMGQyYjBmYzU3YjI1In0= --------- Co-authored-by: Owl Bot --- .../google/maps/places/gapic_version.py | 2 +- .../google/maps/places_v1/gapic_version.py | 2 +- .../places_v1/services/places/async_client.py | 20 +- .../maps/places_v1/services/places/client.py | 20 +- .../services/places/transports/grpc.py | 4 +- .../places/transports/grpc_asyncio.py | 4 +- .../services/places/transports/rest.py | 2 +- .../maps/places_v1/types/attribution.py | 6 +- .../google/maps/places_v1/types/photo.py | 15 +- .../google/maps/places_v1/types/place.py | 441 ++++++++++-------- .../maps/places_v1/types/places_service.py | 144 +++--- .../google/maps/places_v1/types/review.py | 28 +- ...nippet_metadata_google.maps.places.v1.json | 2 +- .../tests/unit/gapic/places_v1/test_places.py | 12 + 14 files changed, 401 insertions(+), 301 deletions(-) diff --git a/packages/google-maps-places/google/maps/places/gapic_version.py b/packages/google-maps-places/google/maps/places/gapic_version.py index ae12f4dfd596..360a0d13ebdd 100644 --- a/packages/google-maps-places/google/maps/places/gapic_version.py +++ b/packages/google-maps-places/google/maps/places/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "0.1.5" # {x-release-please-version} +__version__ = "0.0.0" # {x-release-please-version} diff --git a/packages/google-maps-places/google/maps/places_v1/gapic_version.py b/packages/google-maps-places/google/maps/places_v1/gapic_version.py index ae12f4dfd596..360a0d13ebdd 100644 --- a/packages/google-maps-places/google/maps/places_v1/gapic_version.py +++ b/packages/google-maps-places/google/maps/places_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "0.1.5" # {x-release-please-version} +__version__ = "0.0.0" # {x-release-please-version} diff --git a/packages/google-maps-places/google/maps/places_v1/services/places/async_client.py b/packages/google-maps-places/google/maps/places_v1/services/places/async_client.py index b5b8ae1fe65f..a66be5af3576 100644 --- a/packages/google-maps-places/google/maps/places_v1/services/places/async_client.py +++ b/packages/google-maps-places/google/maps/places_v1/services/places/async_client.py @@ -62,7 +62,7 @@ class PlacesAsyncClient: """Service definition for the Places API. Note: every request actually - requires a field mask set outside of the request proto (all/'*' is + requires a field mask set outside of the request proto (all/'*', is not assumed). That can be set via either a side channel (SystemParameterContext) over RPC, or a header (X-Goog-FieldMask) over HTTP. See: https://cloud.google.com/apis/docs/system-parameters @@ -412,9 +412,15 @@ async def sample_get_photo_media(): The request object. Request for fetching a photo of a place using a photo resource name. name (:class:`str`): - Required. The resource name of a photo. It is returned - in Place's photos.name field. Format: - places//photos//media. + Required. The resource name of a photo media in the + format: + ``places/{place_id}/photos/{photo_reference}/media``. + + The resource name of a photo as returned in a Place + object's ``photos.name`` field comes with the format + ``places/{place_id}/photos/{photo_reference}``. You need + to append ``/media`` at the end of the photo resource to + get the photo media resource name. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this @@ -480,7 +486,7 @@ async def get_place( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> place.Place: - r"""Get a Place with a place id (in a name) string. + r"""Get place details with a place id (in a name) string. .. code-block:: python @@ -513,9 +519,9 @@ async def sample_get_place(): The request object. Request for fetching a Place with a place id (in a name) string. name (:class:`str`): - Required. A place_id returned in a Place (with "places/" + Required. A place ID returned in a Place (with "places/" prefix), or equivalently the name in the same Place. - Format: places/. + Format: ``places/{place_id}``. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/packages/google-maps-places/google/maps/places_v1/services/places/client.py b/packages/google-maps-places/google/maps/places_v1/services/places/client.py index eb8f13584a99..09ae45dab4b3 100644 --- a/packages/google-maps-places/google/maps/places_v1/services/places/client.py +++ b/packages/google-maps-places/google/maps/places_v1/services/places/client.py @@ -102,7 +102,7 @@ def get_transport_class( class PlacesClient(metaclass=PlacesClientMeta): """Service definition for the Places API. Note: every request actually - requires a field mask set outside of the request proto (all/'*' is + requires a field mask set outside of the request proto (all/'*', is not assumed). That can be set via either a side channel (SystemParameterContext) over RPC, or a header (X-Goog-FieldMask) over HTTP. See: https://cloud.google.com/apis/docs/system-parameters @@ -691,9 +691,15 @@ def sample_get_photo_media(): The request object. Request for fetching a photo of a place using a photo resource name. name (str): - Required. The resource name of a photo. It is returned - in Place's photos.name field. Format: - places//photos//media. + Required. The resource name of a photo media in the + format: + ``places/{place_id}/photos/{photo_reference}/media``. + + The resource name of a photo as returned in a Place + object's ``photos.name`` field comes with the format + ``places/{place_id}/photos/{photo_reference}``. You need + to append ``/media`` at the end of the photo resource to + get the photo media resource name. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this @@ -759,7 +765,7 @@ def get_place( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> place.Place: - r"""Get a Place with a place id (in a name) string. + r"""Get place details with a place id (in a name) string. .. code-block:: python @@ -792,9 +798,9 @@ def sample_get_place(): The request object. Request for fetching a Place with a place id (in a name) string. name (str): - Required. A place_id returned in a Place (with "places/" + Required. A place ID returned in a Place (with "places/" prefix), or equivalently the name in the same Place. - Format: places/. + Format: ``places/{place_id}``. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/packages/google-maps-places/google/maps/places_v1/services/places/transports/grpc.py b/packages/google-maps-places/google/maps/places_v1/services/places/transports/grpc.py index dc682544c43b..6b8dc0ff836a 100644 --- a/packages/google-maps-places/google/maps/places_v1/services/places/transports/grpc.py +++ b/packages/google-maps-places/google/maps/places_v1/services/places/transports/grpc.py @@ -31,7 +31,7 @@ class PlacesGrpcTransport(PlacesTransport): """gRPC backend transport for Places. Service definition for the Places API. Note: every request actually - requires a field mask set outside of the request proto (all/'*' is + requires a field mask set outside of the request proto (all/'*', is not assumed). That can be set via either a side channel (SystemParameterContext) over RPC, or a header (X-Goog-FieldMask) over HTTP. See: https://cloud.google.com/apis/docs/system-parameters @@ -318,7 +318,7 @@ def get_photo_media( def get_place(self) -> Callable[[places_service.GetPlaceRequest], place.Place]: r"""Return a callable for the get place method over gRPC. - Get a Place with a place id (in a name) string. + Get place details with a place id (in a name) string. Returns: Callable[[~.GetPlaceRequest], diff --git a/packages/google-maps-places/google/maps/places_v1/services/places/transports/grpc_asyncio.py b/packages/google-maps-places/google/maps/places_v1/services/places/transports/grpc_asyncio.py index 01c02171ec1e..c973a4e8bd05 100644 --- a/packages/google-maps-places/google/maps/places_v1/services/places/transports/grpc_asyncio.py +++ b/packages/google-maps-places/google/maps/places_v1/services/places/transports/grpc_asyncio.py @@ -32,7 +32,7 @@ class PlacesGrpcAsyncIOTransport(PlacesTransport): """gRPC AsyncIO backend transport for Places. Service definition for the Places API. Note: every request actually - requires a field mask set outside of the request proto (all/'*' is + requires a field mask set outside of the request proto (all/'*', is not assumed). That can be set via either a side channel (SystemParameterContext) over RPC, or a header (X-Goog-FieldMask) over HTTP. See: https://cloud.google.com/apis/docs/system-parameters @@ -326,7 +326,7 @@ def get_place( ) -> Callable[[places_service.GetPlaceRequest], Awaitable[place.Place]]: r"""Return a callable for the get place method over gRPC. - Get a Place with a place id (in a name) string. + Get place details with a place id (in a name) string. Returns: Callable[[~.GetPlaceRequest], diff --git a/packages/google-maps-places/google/maps/places_v1/services/places/transports/rest.py b/packages/google-maps-places/google/maps/places_v1/services/places/transports/rest.py index 6d4ac27db97f..d37f84946392 100644 --- a/packages/google-maps-places/google/maps/places_v1/services/places/transports/rest.py +++ b/packages/google-maps-places/google/maps/places_v1/services/places/transports/rest.py @@ -203,7 +203,7 @@ class PlacesRestTransport(PlacesTransport): """REST backend transport for Places. Service definition for the Places API. Note: every request actually - requires a field mask set outside of the request proto (all/'*' is + requires a field mask set outside of the request proto (all/'*', is not assumed). That can be set via either a side channel (SystemParameterContext) over RPC, or a header (X-Goog-FieldMask) over HTTP. See: https://cloud.google.com/apis/docs/system-parameters diff --git a/packages/google-maps-places/google/maps/places_v1/types/attribution.py b/packages/google-maps-places/google/maps/places_v1/types/attribution.py index 6b56f07c539b..1ee0160fe4f0 100644 --- a/packages/google-maps-places/google/maps/places_v1/types/attribution.py +++ b/packages/google-maps-places/google/maps/places_v1/types/attribution.py @@ -34,15 +34,15 @@ class AuthorAttribution(proto.Message): Attributes: display_name (str): - Output only. Name of the author of the + Name of the author of the [Photo][google.maps.places.v1.Photo] or [Review][google.maps.places.v1.Review]. uri (str): - Output only. URI of the author of the + URI of the author of the [Photo][google.maps.places.v1.Photo] or [Review][google.maps.places.v1.Review]. photo_uri (str): - Output only. Profile photo URI of the author of the + Profile photo URI of the author of the [Photo][google.maps.places.v1.Photo] or [Review][google.maps.places.v1.Review]. """ diff --git a/packages/google-maps-places/google/maps/places_v1/types/photo.py b/packages/google-maps-places/google/maps/places_v1/types/photo.py index 34e23e4a44e9..ea9aa9c2c6de 100644 --- a/packages/google-maps-places/google/maps/places_v1/types/photo.py +++ b/packages/google-maps-places/google/maps/places_v1/types/photo.py @@ -34,17 +34,16 @@ class Photo(proto.Message): Attributes: name (str): - Output only. A reference representing this place photo which - may be used to look up this place photo again (a.k.a. the - API "resource" name: places/{place_id}/photos/{photo}). + Identifier. A reference representing this place photo which + may be used to look up this place photo again (also called + the API "resource" name: + ``places/{place_id}/photos/{photo}``). width_px (int): - Output only. The maximum available width, in - pixels. + The maximum available width, in pixels. height_px (int): - Output only. The maximum available height, in - pixels. + The maximum available height, in pixels. author_attributions (MutableSequence[google.maps.places_v1.types.AuthorAttribution]): - Output only. This photo's authors. + This photo's authors. """ name: str = proto.Field( diff --git a/packages/google-maps-places/google/maps/places_v1/types/place.py b/packages/google-maps-places/google/maps/places_v1/types/place.py index b5cec0c4975f..c85b3a3bd271 100644 --- a/packages/google-maps-places/google/maps/places_v1/types/place.py +++ b/packages/google-maps-places/google/maps/places_v1/types/place.py @@ -68,249 +68,270 @@ class Place(proto.Message): Attributes: name (str): - Output only. An ID representing this place which may be used - to look up this place again (a.k.a. the API "resource" name: - places/). + An ID representing this place which may be used to look up + this place again (a.k.a. the API "resource" name: + places/place_id). id (str): - Output only. The unique identifier of a - place. + The unique identifier of a place. display_name (google.type.localized_text_pb2.LocalizedText): - Output only. The localized name of the place, - suitable as a short human-readable description. - For example, "Google Sydney", "Starbucks", - "Pyrmont", etc. + The localized name of the place, suitable as + a short human-readable description. For example, + "Google Sydney", "Starbucks", "Pyrmont", etc. types (MutableSequence[str]): - Output only. A set of type tags for this - result. For example, "political" and "locality". + A set of type tags for this result. For + example, "political" and "locality". For the + complete list of possible values, see Table A + and Table B at + https://developers.google.com/maps/documentation/places/web-service/place-types + primary_type (str): + The primary type of the given result. This + type must one of the Places API supported types. + For example, "restaurant", "cafe", "airport", + etc. A place can only have a single primary + type. For the complete list of possible values, + see Table A and Table B at + https://developers.google.com/maps/documentation/places/web-service/place-types + primary_type_display_name (google.type.localized_text_pb2.LocalizedText): + The display name of the primary type, + localized to the request language if applicable. + For the complete list of possible values, see + Table A and Table B at + https://developers.google.com/maps/documentation/places/web-service/place-types national_phone_number (str): - Output only. A human-readable phone number - for the place, in national format. + A human-readable phone number for the place, + in national format. international_phone_number (str): - Output only. A human-readable phone number - for the place, in international format. + A human-readable phone number for the place, + in international format. formatted_address (str): - Output only. A full, human-readable address - for this place. + A full, human-readable address for this + place. + short_formatted_address (str): + A short, human-readable address for this + place. address_components (MutableSequence[google.maps.places_v1.types.Place.AddressComponent]): - Output only. Repeated components for each - locality level. + Repeated components for each locality level. Note the + following facts about the address_components[] array: + + - The array of address components may contain more + components than the formatted_address. + - The array does not necessarily include all the political + entities that contain an address, apart from those + included in the formatted_address. To retrieve all the + political entities that contain a specific address, you + should use reverse geocoding, passing the + latitude/longitude of the address as a parameter to the + request. + - The format of the response is not guaranteed to remain + the same between requests. In particular, the number of + address_components varies based on the address requested + and can change over time for the same address. A + component can change position in the array. The type of + the component can change. A particular component may be + missing in a later response. plus_code (google.maps.places_v1.types.Place.PlusCode): - Output only. Plus code of the place location - lat/long. + Plus code of the place location lat/long. location (google.type.latlng_pb2.LatLng): - Output only. The position of this place. + The position of this place. viewport (google.geo.type.types.Viewport): - Output only. A viewport suitable for - displaying the place on an average-sized map. + A viewport suitable for displaying the place + on an average-sized map. rating (float): - Output only. A rating between 1.0 and 5.0, - based on user reviews of this place. + A rating between 1.0 and 5.0, based on user + reviews of this place. google_maps_uri (str): - Output only. A URL providing more information - about this place. + A URL providing more information about this + place. website_uri (str): - Output only. The authoritative website for - this place, e.g. a business' homepage. Note that - for places that are part of a chain (e.g. an - IKEA store), this will usually be the website - for the individual store, not the overall chain. + The authoritative website for this place, + e.g. a business' homepage. Note that for places + that are part of a chain (e.g. an IKEA store), + this will usually be the website for the + individual store, not the overall chain. reviews (MutableSequence[google.maps.places_v1.types.Review]): - Output only. List of reviews about this - place. + List of reviews about this place, sorted by + relevance. regular_opening_hours (google.maps.places_v1.types.Place.OpeningHours): - Output only. The regular hours of operation. + The regular hours of operation. utc_offset_minutes (int): - Output only. Number of minutes this place's - timezone is currently offset from UTC. This is - expressed in minutes to support timezones that - are offset by fractions of an hour, e.g. X hours - and 15 minutes. + Number of minutes this place's timezone is + currently offset from UTC. This is expressed in + minutes to support timezones that are offset by + fractions of an hour, e.g. X hours and 15 + minutes. This field is a member of `oneof`_ ``_utc_offset_minutes``. photos (MutableSequence[google.maps.places_v1.types.Photo]): - Output only. Information (including - references) about photos of this place. + Information (including references) about + photos of this place. adr_format_address (str): - Output only. The place's address in adr - microformat: http://microformats.org/wiki/adr. + The place's address in adr microformat: + http://microformats.org/wiki/adr. business_status (google.maps.places_v1.types.Place.BusinessStatus): - Output only. The business status for the - place. + The business status for the place. price_level (google.maps.places_v1.types.PriceLevel): - Output only. Price level of the place. + Price level of the place. attributions (MutableSequence[google.maps.places_v1.types.Place.Attribution]): - Output only. A set of data provider that must - be shown with this result. + A set of data provider that must be shown + with this result. user_rating_count (int): - Output only. The total number of reviews - (with or without text) for this place. + The total number of reviews (with or without + text) for this place. This field is a member of `oneof`_ ``_user_rating_count``. icon_mask_base_uri (str): - Output only. A truncated URL to an v2 icon - mask. User can access different icon type by - appending type suffix to the end (eg, ".svg" or - ".png"). + A truncated URL to an icon mask. User can + access different icon type by appending type + suffix to the end (eg, ".svg" or ".png"). icon_background_color (str): - Output only. Background color for icon_mask in hex format, - e.g. #909CE1. + Background color for icon_mask in hex format, e.g. #909CE1. takeout (bool): - Output only. Specifies if the business - supports takeout. + Specifies if the business supports takeout. This field is a member of `oneof`_ ``_takeout``. delivery (bool): - Output only. Specifies if the business - supports delivery. + Specifies if the business supports delivery. This field is a member of `oneof`_ ``_delivery``. dine_in (bool): - Output only. Specifies if the business - supports indoor or outdoor seating options. + Specifies if the business supports indoor or + outdoor seating options. This field is a member of `oneof`_ ``_dine_in``. curbside_pickup (bool): - Output only. Specifies if the business - supports curbside pickup. + Specifies if the business supports curbside + pickup. This field is a member of `oneof`_ ``_curbside_pickup``. reservable (bool): - Output only. Specifies if the place supports - reservations. + Specifies if the place supports reservations. This field is a member of `oneof`_ ``_reservable``. serves_breakfast (bool): - Output only. Specifies if the place serves - breakfast. + Specifies if the place serves breakfast. This field is a member of `oneof`_ ``_serves_breakfast``. serves_lunch (bool): - Output only. Specifies if the place serves - lunch. + Specifies if the place serves lunch. This field is a member of `oneof`_ ``_serves_lunch``. serves_dinner (bool): - Output only. Specifies if the place serves - dinner. + Specifies if the place serves dinner. This field is a member of `oneof`_ ``_serves_dinner``. serves_beer (bool): - Output only. Specifies if the place serves - beer. + Specifies if the place serves beer. This field is a member of `oneof`_ ``_serves_beer``. serves_wine (bool): - Output only. Specifies if the place serves - wine. + Specifies if the place serves wine. This field is a member of `oneof`_ ``_serves_wine``. serves_brunch (bool): - Output only. Specifies if the place serves - brunch. + Specifies if the place serves brunch. This field is a member of `oneof`_ ``_serves_brunch``. serves_vegetarian_food (bool): - Output only. Specifies if the place serves - vegetarian food. + Specifies if the place serves vegetarian + food. This field is a member of `oneof`_ ``_serves_vegetarian_food``. current_opening_hours (google.maps.places_v1.types.Place.OpeningHours): - Output only. The hours of operation for the next seven days - (including today). The time period starts at midnight on the - date of the request and ends at 11:59 pm six days later. - This field includes the special_days subfield of all hours, - set for dates that have exceptional hours. + The hours of operation for the next seven days (including + today). The time period starts at midnight on the date of + the request and ends at 11:59 pm six days later. This field + includes the special_days subfield of all hours, set for + dates that have exceptional hours. current_secondary_opening_hours (MutableSequence[google.maps.places_v1.types.Place.OpeningHours]): - Output only. Contains an array of entries for the next seven - days including information about secondary hours of a - business. Secondary hours are different from a business's - main hours. For example, a restaurant can specify drive - through hours or delivery hours as its secondary hours. This - field populates the type subfield, which draws from a - predefined list of opening hours types (such as - DRIVE_THROUGH, PICKUP, or TAKEOUT) based on the types of the - place. This field includes the special_days subfield of all - hours, set for dates that have exceptional hours. + Contains an array of entries for the next seven days + including information about secondary hours of a business. + Secondary hours are different from a business's main hours. + For example, a restaurant can specify drive through hours or + delivery hours as its secondary hours. This field populates + the type subfield, which draws from a predefined list of + opening hours types (such as DRIVE_THROUGH, PICKUP, or + TAKEOUT) based on the types of the place. This field + includes the special_days subfield of all hours, set for + dates that have exceptional hours. regular_secondary_opening_hours (MutableSequence[google.maps.places_v1.types.Place.OpeningHours]): - Output only. Contains an array of entries for information - about regular secondary hours of a business. Secondary hours - are different from a business's main hours. For example, a - restaurant can specify drive through hours or delivery hours - as its secondary hours. This field populates the type - subfield, which draws from a predefined list of opening - hours types (such as DRIVE_THROUGH, PICKUP, or TAKEOUT) - based on the types of the place. + Contains an array of entries for information about regular + secondary hours of a business. Secondary hours are different + from a business's main hours. For example, a restaurant can + specify drive through hours or delivery hours as its + secondary hours. This field populates the type subfield, + which draws from a predefined list of opening hours types + (such as DRIVE_THROUGH, PICKUP, or TAKEOUT) based on the + types of the place. editorial_summary (google.type.localized_text_pb2.LocalizedText): - Output only. Contains a summary of the place. - A summary is comprised of a textual overview, - and also includes the language code for these if + Contains a summary of the place. A summary is + comprised of a textual overview, and also + includes the language code for these if applicable. Summary text must be presented as-is and can not be modified or altered. outdoor_seating (bool): - Output only. Place provides outdoor seating. + Place provides outdoor seating. This field is a member of `oneof`_ ``_outdoor_seating``. live_music (bool): - Output only. Place provides live music. + Place provides live music. This field is a member of `oneof`_ ``_live_music``. menu_for_children (bool): - Output only. Place has a children's menu. + Place has a children's menu. This field is a member of `oneof`_ ``_menu_for_children``. serves_cocktails (bool): - Output only. Place serves cocktails. + Place serves cocktails. This field is a member of `oneof`_ ``_serves_cocktails``. serves_dessert (bool): - Output only. Place serves dessert. + Place serves dessert. This field is a member of `oneof`_ ``_serves_dessert``. serves_coffee (bool): - Output only. Place serves coffee. + Place serves coffee. This field is a member of `oneof`_ ``_serves_coffee``. good_for_children (bool): - Output only. Place is good for children. + Place is good for children. This field is a member of `oneof`_ ``_good_for_children``. allows_dogs (bool): - Output only. Place allows dogs. + Place allows dogs. This field is a member of `oneof`_ ``_allows_dogs``. restroom (bool): - Output only. Place has restroom. + Place has restroom. This field is a member of `oneof`_ ``_restroom``. good_for_groups (bool): - Output only. Place accommodates groups. + Place accommodates groups. This field is a member of `oneof`_ ``_good_for_groups``. good_for_watching_sports (bool): - Output only. Place is suitable for watching - sports. + Place is suitable for watching sports. This field is a member of `oneof`_ ``_good_for_watching_sports``. payment_options (google.maps.places_v1.types.Place.PaymentOptions): - + Payment options the place accepts. If a + payment option data is not available, the + payment option field will be unset. parking_options (google.maps.places_v1.types.Place.ParkingOptions): - Output only. Options of parking provided by - the place. + Options of parking provided by the place. sub_destinations (MutableSequence[google.maps.places_v1.types.Place.SubDestination]): - Output only. A list of sub destinations - related to the place. + A list of sub destinations related to the + place. accessibility_options (google.maps.places_v1.types.Place.AccessibilityOptions): - Output only. Information about the - accessibility options a place offers. + Information about the accessibility options a + place offers. This field is a member of `oneof`_ ``_accessibility_options``. fuel_options (google.maps.places_v1.types.FuelOptions): - Output only. The most recent information - about fuel options in a gas station. This - information is updated regularly. + The most recent information about fuel + options in a gas station. This information is + updated regularly. ev_charge_options (google.maps.places_v1.types.EVChargeOptions): - Output only. Information of ev charging - options. + Information of ev charging options. """ class BusinessStatus(proto.Enum): @@ -338,19 +359,19 @@ class AddressComponent(proto.Message): Attributes: long_text (str): - Output only. The full text description or name of the - address component. For example, an address component for the - country Australia may have a long_name of "Australia". + The full text description or name of the address component. + For example, an address component for the country Australia + may have a long_name of "Australia". short_text (str): - Output only. An abbreviated textual name for the address - component, if available. For example, an address component - for the country of Australia may have a short_name of "AU". + An abbreviated textual name for the address component, if + available. For example, an address component for the country + of Australia may have a short_name of "AU". types (MutableSequence[str]): - Output only. An array indicating the type(s) - of the address component. + An array indicating the type(s) of the + address component. language_code (str): - Output only. The language used to format this - components, in CLDR notation. + The language used to format this components, + in CLDR notation. """ long_text: str = proto.Field( @@ -378,15 +399,14 @@ class PlusCode(proto.Message): Attributes: global_code (str): - Output only. Place's global (full) code, such - as "9FWM33GV+HQ", representing an 1/8000 by - 1/8000 degree area (~14 by 14 meters). + Place's global (full) code, such as + "9FWM33GV+HQ", representing an 1/8000 by 1/8000 + degree area (~14 by 14 meters). compound_code (str): - Output only. Place's compound code, such as - "33GV+HQ, Ramberg, Norway", containing the - suffix of the global code and replacing the - prefix with a formatted name of a reference - entity. + Place's compound code, such as "33GV+HQ, + Ramberg, Norway", containing the suffix of the + global code and replacing the prefix with a + formatted name of a reference entity. """ global_code: str = proto.Field( @@ -405,37 +425,35 @@ class OpeningHours(proto.Message): Attributes: open_now (bool): - Output only. Is this place open right now? - Always present unless we lack time-of-day or - timezone data for these opening hours. + Is this place open right now? Always present + unless we lack time-of-day or timezone data for + these opening hours. This field is a member of `oneof`_ ``_open_now``. periods (MutableSequence[google.maps.places_v1.types.Place.OpeningHours.Period]): - Output only. The periods that this place is - open during the week. The periods are in - chronological order, starting with Sunday in the - place-local timezone. An empty (but not absent) - value indicates a place that is never open, e.g. + The periods that this place is open during + the week. The periods are in chronological + order, starting with Sunday in the place-local + timezone. An empty (but not absent) value + indicates a place that is never open, e.g. because it is closed temporarily for renovations. weekday_descriptions (MutableSequence[str]): - Output only. Localized strings describing the - opening hours of this place, one string for each - day of the week. Will be empty if the hours are + Localized strings describing the opening + hours of this place, one string for each day of + the week. Will be empty if the hours are unknown or could not be converted to localized - text. Example: "Sun: - - 18:00–06:00". + text. Example: "Sun: 18:00–06:00". secondary_hours_type (google.maps.places_v1.types.Place.OpeningHours.SecondaryHoursType): - Output only. A type string used to identify - the type of secondary hours. + A type string used to identify the type of + secondary hours. special_days (MutableSequence[google.maps.places_v1.types.Place.OpeningHours.SpecialDay]): - Output only. Structured information for special days that - fall within the period that the returned opening hours - cover. Special days are days that could impact the business - hours of a place, e.g. Christmas day. Set for - current_opening_hours and current_secondary_opening_hours if - there are exceptional hours. + Structured information for special days that fall within the + period that the returned opening hours cover. Special days + are days that could impact the business hours of a place, + e.g. Christmas day. Set for current_opening_hours and + current_secondary_opening_hours if there are exceptional + hours. """ class SecondaryHoursType(proto.Enum): @@ -493,11 +511,9 @@ class Period(proto.Message): Attributes: open_ (google.maps.places_v1.types.Place.OpeningHours.Period.Point): - Output only. The time that the place starts - to be open. + The time that the place starts to be open. close (google.maps.places_v1.types.Place.OpeningHours.Period.Point): - Output only. The time that the place starts - to be closed. + The time that the place starts to be closed. """ class Point(proto.Message): @@ -507,31 +523,26 @@ class Point(proto.Message): Attributes: day (int): - Output only. A day of the week, as an integer - in the range 0-6. 0 is Sunday, 1 is Monday, - etc. + A day of the week, as an integer in the range + 0-6. 0 is Sunday, 1 is Monday, etc. This field is a member of `oneof`_ ``_day``. hour (int): - Output only. The hour in 2 digits. Ranges - from 00 to 23. + The hour in 2 digits. Ranges from 00 to 23. This field is a member of `oneof`_ ``_hour``. minute (int): - Output only. The minute in 2 digits. Ranges - from 00 to 59. + The minute in 2 digits. Ranges from 00 to 59. This field is a member of `oneof`_ ``_minute``. date (google.type.date_pb2.Date): - Output only. Date in the local timezone for - the place. + Date in the local timezone for the place. truncated (bool): - Output only. Whether or not this endpoint was truncated. - Truncation occurs when the real hours are outside the times - we are willing to return hours between, so we truncate the - hours back to these boundaries. This ensures that at most 24 - \* 7 hours from midnight of the day of the request are - returned. + Whether or not this endpoint was truncated. Truncation + occurs when the real hours are outside the times we are + willing to return hours between, so we truncate the hours + back to these boundaries. This ensures that at most 24 \* 7 + hours from midnight of the day of the request are returned. """ day: int = proto.Field( @@ -578,7 +589,7 @@ class SpecialDay(proto.Message): Attributes: date (google.type.date_pb2.Date): - Output only. The date of this special day. + The date of this special day. """ date: date_pb2.Date = proto.Field( @@ -619,11 +630,9 @@ class Attribution(proto.Message): Attributes: provider (str): - Output only. Name of the Place's data - provider. + Name of the Place's data provider. provider_uri (str): - Output only. URI to the Place's data - provider. + URI to the Place's data provider. """ provider: str = proto.Field( @@ -783,17 +792,44 @@ class AccessibilityOptions(proto.Message): .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: + wheelchair_accessible_parking (bool): + Place offers wheelchair accessible parking. + + This field is a member of `oneof`_ ``_wheelchair_accessible_parking``. wheelchair_accessible_entrance (bool): Places has wheelchair accessible entrance. This field is a member of `oneof`_ ``_wheelchair_accessible_entrance``. + wheelchair_accessible_restroom (bool): + Place has wheelchair accessible restroom. + + This field is a member of `oneof`_ ``_wheelchair_accessible_restroom``. + wheelchair_accessible_seating (bool): + Place has wheelchair accessible seating. + + This field is a member of `oneof`_ ``_wheelchair_accessible_seating``. """ + wheelchair_accessible_parking: bool = proto.Field( + proto.BOOL, + number=1, + optional=True, + ) wheelchair_accessible_entrance: bool = proto.Field( proto.BOOL, number=2, optional=True, ) + wheelchair_accessible_restroom: bool = proto.Field( + proto.BOOL, + number=3, + optional=True, + ) + wheelchair_accessible_seating: bool = proto.Field( + proto.BOOL, + number=4, + optional=True, + ) name: str = proto.Field( proto.STRING, @@ -812,6 +848,15 @@ class AccessibilityOptions(proto.Message): proto.STRING, number=5, ) + primary_type: str = proto.Field( + proto.STRING, + number=50, + ) + primary_type_display_name: localized_text_pb2.LocalizedText = proto.Field( + proto.MESSAGE, + number=32, + message=localized_text_pb2.LocalizedText, + ) national_phone_number: str = proto.Field( proto.STRING, number=7, @@ -824,6 +869,10 @@ class AccessibilityOptions(proto.Message): proto.STRING, number=9, ) + short_formatted_address: str = proto.Field( + proto.STRING, + number=51, + ) address_components: MutableSequence[AddressComponent] = proto.RepeatedField( proto.MESSAGE, number=10, diff --git a/packages/google-maps-places/google/maps/places_v1/types/places_service.py b/packages/google-maps-places/google/maps/places_v1/types/places_service.py index faec30184cd1..07e4c7937378 100644 --- a/packages/google-maps-places/google/maps/places_v1/types/places_service.py +++ b/packages/google-maps-places/google/maps/places_v1/types/places_service.py @@ -58,12 +58,16 @@ class SearchNearbyRequest(proto.Message): applicable law. For more information, see - http://www.unicode.org/reports/tr35/#unicode_region_subtag. + https://www.unicode.org/cldr/charts/latest/supplemental/territory_language_information.html. Note that 3-digit region codes are not currently supported. included_types (MutableSequence[str]): Included Place type (eg, "restaurant" or "gas_station") from - https://developers.google.com/places/supported_types. + https://developers.google.com/maps/documentation/places/web-service/place-types. + + Up to 50 types from `Table + A `__ + may be specified. If there are any conflicting types, i.e. a type appears in both included_types and excluded_types, an INVALID_ARGUMENT @@ -73,12 +77,16 @@ class SearchNearbyRequest(proto.Message): restrictions, only places that satisfy all of the restrictions are returned. For example, if we have {included_types = ["restaurant"], excluded_primary_types = - ["restaurant"]}, the returned places are POIs that provide - "restaurant" related services but do not operate primarily - as "restaurants". + ["restaurant"]}, the returned places provide "restaurant" + related services but do not operate primarily as + "restaurants". excluded_types (MutableSequence[str]): Excluded Place type (eg, "restaurant" or "gas_station") from - https://developers.google.com/places/supported_types. + https://developers.google.com/maps/documentation/places/web-service/place-types. + + Up to 50 types from `Table + A `__ + may be specified. If the client provides both included_types (e.g. restaurant) and excluded_types (e.g. cafe), then the response should @@ -94,13 +102,19 @@ class SearchNearbyRequest(proto.Message): restrictions, only places that satisfy all of the restrictions are returned. For example, if we have {included_types = ["restaurant"], excluded_primary_types = - ["restaurant"]}, the returned places are POIs that provide - "restaurant" related services but do not operate primarily - as "restaurants". + ["restaurant"]}, the returned places provide "restaurant" + related services but do not operate primarily as + "restaurants". included_primary_types (MutableSequence[str]): Included primary Place type (e.g. "restaurant" or "gas_station") from - https://developers.google.com/places/supported_types. + https://developers.google.com/maps/documentation/places/web-service/place-types. + A place can only have a single primary type from the + supported types table associated with it. + + Up to 50 types from `Table + A `__ + may be specified. If there are any conflicting primary types, i.e. a type appears in both included_primary_types and @@ -111,13 +125,17 @@ class SearchNearbyRequest(proto.Message): restrictions, only places that satisfy all of the restrictions are returned. For example, if we have {included_types = ["restaurant"], excluded_primary_types = - ["restaurant"]}, the returned places are POIs that provide - "restaurant" related services but do not operate primarily - as "restaurants". + ["restaurant"]}, the returned places provide "restaurant" + related services but do not operate primarily as + "restaurants". excluded_primary_types (MutableSequence[str]): Excluded primary Place type (e.g. "restaurant" or "gas_station") from - https://developers.google.com/places/supported_types. + https://developers.google.com/maps/documentation/places/web-service/place-types. + + Up to 50 types from `Table + A `__ + may be specified. If there are any conflicting primary types, i.e. a type appears in both included_primary_types and @@ -128,15 +146,15 @@ class SearchNearbyRequest(proto.Message): restrictions, only places that satisfy all of the restrictions are returned. For example, if we have {included_types = ["restaurant"], excluded_primary_types = - ["restaurant"]}, the returned places are POIs that provide - "restaurant" related services but do not operate primarily - as "restaurants". + ["restaurant"]}, the returned places provide "restaurant" + related services but do not operate primarily as + "restaurants". max_result_count (int): Maximum number of results to return. It must be between 1 - and 20, inclusively. If the number is unset, it falls back - to the upper limit. If the number is set to negative or - exceeds the upper limit, an INVALID_ARGUMENT error is - returned. + and 20 (default), inclusively. If the number is unset, it + falls back to the upper limit. If the number is set to + negative or exceeds the upper limit, an INVALID_ARGUMENT + error is returned. location_restriction (google.maps.places_v1.types.SearchNearbyRequest.LocationRestriction): Required. The region to search. rank_preference (google.maps.places_v1.types.SearchNearbyRequest.RankPreference): @@ -223,9 +241,9 @@ class SearchNearbyResponse(proto.Message): Attributes: places (MutableSequence[google.maps.places_v1.types.Place]): - A list of interesting places that meets - user's requirements like places types, number of - places and specific location restriction. + A list of places that meets user's + requirements like places types, number of places + and specific location restriction. """ places: MutableSequence[place.Place] = proto.RepeatedField( @@ -259,33 +277,32 @@ class SearchTextRequest(proto.Message): applicable law. For more information, see - http://www.unicode.org/reports/tr35/#unicode_region_subtag. + https://www.unicode.org/cldr/charts/latest/supplemental/territory_language_information.html. Note that 3-digit region codes are not currently supported. rank_preference (google.maps.places_v1.types.SearchTextRequest.RankPreference): How results will be ranked in the response. included_type (str): - The requested place type. Full list of types supported: - https://developers.google.com/places/supported_types. Only - support one included type. + The requested place type. Full list of types + supported: + https://developers.google.com/maps/documentation/places/web-service/place-types. + Only support one included type. open_now (bool): - Used to restrict the search to places that are open at a - specific time. open_now marks if a business is currently - open. + Used to restrict the search to places that + are currently open. The default is false. min_rating (float): Filter out results whose average user rating is strictly - less than this limit. A valid value must be an float between + less than this limit. A valid value must be a float between 0 and 5 (inclusively) at a 0.5 cadence i.e. [0, 0.5, 1.0, - ... , 5.0] inclusively. This is to keep parity with - LocalRefinement_UserRating. The input rating will round up - to the nearest 0.5(ceiling). For instance, a rating of 0.6 - will eliminate all results with a less than 1.0 rating. + ... , 5.0] inclusively. The input rating will round up to + the nearest 0.5(ceiling). For instance, a rating of 0.6 will + eliminate all results with a less than 1.0 rating. max_result_count (int): Maximum number of results to return. It must be between 1 - and 20, inclusively. If the number is unset, it falls back - to the upper limit. If the number is set to negative or - exceeds the upper limit, an INVALID_ARGUMENT error is - returned. + and 20, inclusively. The default is 20. If the number is + unset, it falls back to the upper limit. If the number is + set to negative or exceeds the upper limit, an + INVALID_ARGUMENT error is returned. price_levels (MutableSequence[google.maps.places_v1.types.PriceLevel]): Used to restrict the search to places that are marked as certain price levels. Users can @@ -316,8 +333,7 @@ class RankPreference(proto.Enum): Ranks results by distance. RELEVANCE (2): Ranks results by relevance. Sort order - determined by normal ranking stack. See - SortRefinement::RELEVANCE. + determined by normal ranking stack. """ RANK_PREFERENCE_UNSPECIFIED = 0 DISTANCE = 1 @@ -336,8 +352,14 @@ class LocationBias(proto.Message): Attributes: rectangle (google.geo.type.types.Viewport): - A rectangle box defined by northeast and - southwest corner. + A rectangle box defined by northeast and southwest corner. + ``rectangle.high()`` must be the northeast point of the + rectangle viewport. ``rectangle.low()`` must be the + southwest point of the rectangle viewport. + ``rectangle.low().latitude()`` cannot be greater than + ``rectangle.high().latitude()``. This will result in an + empty latitude range. A rectangle viewport cannot be wider + than 180 degrees. This field is a member of `oneof`_ ``type``. circle (google.maps.places_v1.types.Circle): @@ -368,8 +390,14 @@ class LocationRestriction(proto.Message): Attributes: rectangle (google.geo.type.types.Viewport): - A rectangle box defined by northeast and - southwest corner. + A rectangle box defined by northeast and southwest corner. + ``rectangle.high()`` must be the northeast point of the + rectangle viewport. ``rectangle.low()`` must be the + southwest point of the rectangle viewport. + ``rectangle.low().latitude()`` cannot be greater than + ``rectangle.high().latitude()``. This will result in an + empty latitude range. A rectangle viewport cannot be wider + than 180 degrees. This field is a member of `oneof`_ ``type``. """ @@ -457,9 +485,14 @@ class GetPhotoMediaRequest(proto.Message): Attributes: name (str): - Required. The resource name of a photo. It is returned in - Place's photos.name field. Format: - places//photos//media. + Required. The resource name of a photo media in the format: + ``places/{place_id}/photos/{photo_reference}/media``. + + The resource name of a photo as returned in a Place object's + ``photos.name`` field comes with the format + ``places/{place_id}/photos/{photo_reference}``. You need to + append ``/media`` at the end of the photo resource to get + the photo media resource name. max_width_px (int): Optional. Specifies the maximum desired width, in pixels, of the image. If the image is smaller than the values @@ -493,7 +526,7 @@ class GetPhotoMediaRequest(proto.Message): redirect behavior and render a text format (for example, in JSON format for HTTP use case) response. If not set, an HTTP redirect will be - issued to redirect the call to the image midea. + issued to redirect the call to the image media. This option is ignored for non-HTTP requests. """ @@ -520,9 +553,8 @@ class PhotoMedia(proto.Message): Attributes: name (str): - The resource name of a photo. It is returned in Place's - photos.name field. Format: - places//photos//media. + The resource name of a photo media in the format: + ``places/{place_id}/photos/{photo_reference}/media``. photo_uri (str): A short-lived uri that can be used to render the photo. @@ -544,9 +576,9 @@ class GetPlaceRequest(proto.Message): Attributes: name (str): - Required. A place_id returned in a Place (with "places/" + Required. A place ID returned in a Place (with "places/" prefix), or equivalently the name in the same Place. Format: - places/. + ``places/{place_id}``. language_code (str): Optional. Place details will be displayed with the preferred language if available. @@ -560,7 +592,7 @@ class GetPlaceRequest(proto.Message): used to display the place details, like region-specific place name, if available. The parameter can affect results based on applicable law. For more information, see - http://www.unicode.org/reports/tr35/#unicode_region_subtag. + https://www.unicode.org/cldr/charts/latest/supplemental/territory_language_information.html. Note that 3-digit region codes are not currently supported. """ diff --git a/packages/google-maps-places/google/maps/places_v1/types/review.py b/packages/google-maps-places/google/maps/places_v1/types/review.py index b5df7ae538f2..2c13eba6824b 100644 --- a/packages/google-maps-places/google/maps/places_v1/types/review.py +++ b/packages/google-maps-places/google/maps/places_v1/types/review.py @@ -36,28 +36,24 @@ class Review(proto.Message): Attributes: name (str): - Output only. A reference representing this place review - which may be used to look up this place review again (a.k.a. - the API "resource" name: - places/{place_id}/reviews/{review}). + A reference representing this place review which may be used + to look up this place review again (also called the API + "resource" name: ``places/{place_id}/reviews/{review}``). relative_publish_time_description (str): - Output only. A string of formatted recent - time, expressing the review time relative to the - current time in a form appropriate for the - language and country. + A string of formatted recent time, expressing + the review time relative to the current time in + a form appropriate for the language and country. text (google.type.localized_text_pb2.LocalizedText): - Output only. The localized text of the - review. + The localized text of the review. original_text (google.type.localized_text_pb2.LocalizedText): - Output only. The review text in its original - language. + The review text in its original language. rating (float): - Output only. A number between 1.0 and 5.0, - a.k.a. the number of stars. + A number between 1.0 and 5.0, also called the + number of stars. author_attribution (google.maps.places_v1.types.AuthorAttribution): - Output only. This review's author. + This review's author. publish_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp for the review. + Timestamp for the review. """ name: str = proto.Field( diff --git a/packages/google-maps-places/samples/generated_samples/snippet_metadata_google.maps.places.v1.json b/packages/google-maps-places/samples/generated_samples/snippet_metadata_google.maps.places.v1.json index 8302a208b6e7..6444e295ff29 100644 --- a/packages/google-maps-places/samples/generated_samples/snippet_metadata_google.maps.places.v1.json +++ b/packages/google-maps-places/samples/generated_samples/snippet_metadata_google.maps.places.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-maps-places", - "version": "0.1.5" + "version": "0.1.0" }, "snippets": [ { diff --git a/packages/google-maps-places/tests/unit/gapic/places_v1/test_places.py b/packages/google-maps-places/tests/unit/gapic/places_v1/test_places.py index f8946f734500..2c8e8323ef47 100644 --- a/packages/google-maps-places/tests/unit/gapic/places_v1/test_places.py +++ b/packages/google-maps-places/tests/unit/gapic/places_v1/test_places.py @@ -1094,9 +1094,11 @@ def test_get_place(request_type, transport: str = "grpc"): name="name_value", id="id_value", types=["types_value"], + primary_type="primary_type_value", national_phone_number="national_phone_number_value", international_phone_number="international_phone_number_value", formatted_address="formatted_address_value", + short_formatted_address="short_formatted_address_value", rating=0.645, google_maps_uri="google_maps_uri_value", website_uri="website_uri_value", @@ -1143,9 +1145,11 @@ def test_get_place(request_type, transport: str = "grpc"): assert response.name == "name_value" assert response.id == "id_value" assert response.types == ["types_value"] + assert response.primary_type == "primary_type_value" assert response.national_phone_number == "national_phone_number_value" assert response.international_phone_number == "international_phone_number_value" assert response.formatted_address == "formatted_address_value" + assert response.short_formatted_address == "short_formatted_address_value" assert math.isclose(response.rating, 0.645, rel_tol=1e-6) assert response.google_maps_uri == "google_maps_uri_value" assert response.website_uri == "website_uri_value" @@ -1218,9 +1222,11 @@ async def test_get_place_async( name="name_value", id="id_value", types=["types_value"], + primary_type="primary_type_value", national_phone_number="national_phone_number_value", international_phone_number="international_phone_number_value", formatted_address="formatted_address_value", + short_formatted_address="short_formatted_address_value", rating=0.645, google_maps_uri="google_maps_uri_value", website_uri="website_uri_value", @@ -1268,9 +1274,11 @@ async def test_get_place_async( assert response.name == "name_value" assert response.id == "id_value" assert response.types == ["types_value"] + assert response.primary_type == "primary_type_value" assert response.national_phone_number == "national_phone_number_value" assert response.international_phone_number == "international_phone_number_value" assert response.formatted_address == "formatted_address_value" + assert response.short_formatted_address == "short_formatted_address_value" assert math.isclose(response.rating, 0.645, rel_tol=1e-6) assert response.google_maps_uri == "google_maps_uri_value" assert response.website_uri == "website_uri_value" @@ -2175,9 +2183,11 @@ def test_get_place_rest(request_type): name="name_value", id="id_value", types=["types_value"], + primary_type="primary_type_value", national_phone_number="national_phone_number_value", international_phone_number="international_phone_number_value", formatted_address="formatted_address_value", + short_formatted_address="short_formatted_address_value", rating=0.645, google_maps_uri="google_maps_uri_value", website_uri="website_uri_value", @@ -2229,9 +2239,11 @@ def test_get_place_rest(request_type): assert response.name == "name_value" assert response.id == "id_value" assert response.types == ["types_value"] + assert response.primary_type == "primary_type_value" assert response.national_phone_number == "national_phone_number_value" assert response.international_phone_number == "international_phone_number_value" assert response.formatted_address == "formatted_address_value" + assert response.short_formatted_address == "short_formatted_address_value" assert math.isclose(response.rating, 0.645, rel_tol=1e-6) assert response.google_maps_uri == "google_maps_uri_value" assert response.website_uri == "website_uri_value" From d99f5b0ec5dcaa254bfa30dbf0495063a7a82374 Mon Sep 17 00:00:00 2001 From: "owlbot-bootstrapper[bot]" <104649659+owlbot-bootstrapper[bot]@users.noreply.github.com> Date: Thu, 7 Dec 2023 21:31:58 +0000 Subject: [PATCH 03/80] feat: add initial files for google.apps.meet.v2beta (#12100) Source-Link: https://github.com/googleapis/googleapis-gen/commit/d1a3cba834ead4c8a93644dd84ff0d2b0fc57b25 Copy-Tag: eyJwIjoicGFja2FnZXMvZ29vZ2xlLWFwcHMtbWVldC8uT3dsQm90LnlhbWwiLCJoIjoiZDFhM2NiYTgzNGVhZDRjOGE5MzY0NGRkODRmZjBkMmIwZmM1N2IyNSJ9 --------- Co-authored-by: Owlbot Bootstrapper Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- packages/google-apps-meet/.OwlBot.yaml | 18 + packages/google-apps-meet/.coveragerc | 13 + packages/google-apps-meet/.flake8 | 33 + packages/google-apps-meet/.gitignore | 63 + packages/google-apps-meet/.repo-metadata.json | 17 + packages/google-apps-meet/CHANGELOG.md | 1 + packages/google-apps-meet/CODE_OF_CONDUCT.md | 95 + packages/google-apps-meet/CONTRIBUTING.rst | 271 + packages/google-apps-meet/LICENSE | 202 + packages/google-apps-meet/MANIFEST.in | 25 + packages/google-apps-meet/README.rst | 108 + packages/google-apps-meet/docs/CHANGELOG.md | 1 + packages/google-apps-meet/docs/README.rst | 1 + .../google-apps-meet/docs/_static/custom.css | 20 + .../docs/_templates/layout.html | 50 + packages/google-apps-meet/docs/conf.py | 384 + packages/google-apps-meet/docs/index.rst | 23 + .../conference_records_service.rst | 10 + .../docs/meet_v2beta/services_.rst | 7 + .../docs/meet_v2beta/spaces_service.rst | 6 + .../docs/meet_v2beta/types_.rst | 6 + .../google-apps-meet/docs/multiprocessing.rst | 7 + .../google/apps/meet/__init__.py | 113 + .../google/apps/meet/gapic_version.py | 16 + .../google/apps/meet/py.typed | 2 + .../google/apps/meet_v2beta/__init__.py | 108 + .../apps/meet_v2beta/gapic_metadata.json | 287 + .../google/apps/meet_v2beta/gapic_version.py | 16 + .../google/apps/meet_v2beta/py.typed | 2 + .../apps/meet_v2beta/services/__init__.py | 15 + .../conference_records_service/__init__.py | 22 + .../async_client.py | 1696 +++ .../conference_records_service/client.py | 1906 ++++ .../conference_records_service/pagers.py | 795 ++ .../transports/__init__.py | 41 + .../transports/base.py | 433 + .../transports/grpc.py | 603 ++ .../transports/grpc_asyncio.py | 616 ++ .../transports/rest.py | 1718 +++ .../services/spaces_service/__init__.py | 22 + .../services/spaces_service/async_client.py | 653 ++ .../services/spaces_service/client.py | 882 ++ .../spaces_service/transports/__init__.py | 36 + .../spaces_service/transports/base.py | 204 + .../spaces_service/transports/grpc.py | 346 + .../spaces_service/transports/grpc_asyncio.py | 351 + .../spaces_service/transports/rest.py | 668 ++ .../google/apps/meet_v2beta/types/__init__.py | 94 + .../google/apps/meet_v2beta/types/resource.py | 637 ++ .../google/apps/meet_v2beta/types/service.py | 615 ++ packages/google-apps-meet/mypy.ini | 3 + packages/google-apps-meet/noxfile.py | 410 + ...rds_service_get_conference_record_async.py | 52 + ...ords_service_get_conference_record_sync.py | 52 + ...e_records_service_get_participant_async.py | 52 + ...s_service_get_participant_session_async.py | 52 + ...ds_service_get_participant_session_sync.py | 52 + ...ce_records_service_get_participant_sync.py | 52 + ...nce_records_service_get_recording_async.py | 52 + ...ence_records_service_get_recording_sync.py | 52 + ...ce_records_service_get_transcript_async.py | 52 + ...ords_service_get_transcript_entry_async.py | 52 + ...cords_service_get_transcript_entry_sync.py | 52 + ...nce_records_service_get_transcript_sync.py | 52 + ...s_service_list_conference_records_async.py | 52 + ...ds_service_list_conference_records_sync.py | 52 + ...service_list_participant_sessions_async.py | 53 + ..._service_list_participant_sessions_sync.py | 53 + ...records_service_list_participants_async.py | 53 + ..._records_service_list_participants_sync.py | 53 + ...e_records_service_list_recordings_async.py | 53 + ...ce_records_service_list_recordings_sync.py | 53 + ...s_service_list_transcript_entries_async.py | 53 + ...ds_service_list_transcript_entries_sync.py | 53 + ..._records_service_list_transcripts_async.py | 53 + ...e_records_service_list_transcripts_sync.py | 53 + ...rated_spaces_service_create_space_async.py | 51 + ...erated_spaces_service_create_space_sync.py | 51 + ...ces_service_end_active_conference_async.py | 50 + ...aces_service_end_active_conference_sync.py | 50 + ...enerated_spaces_service_get_space_async.py | 52 + ...generated_spaces_service_get_space_sync.py | 52 + ...rated_spaces_service_update_space_async.py | 51 + ...erated_spaces_service_update_space_sync.py | 51 + ...ppet_metadata_google.apps.meet.v2beta.json | 2585 +++++ .../scripts/decrypt-secrets.sh | 46 + .../scripts/fixup_meet_v2beta_keywords.py | 191 + packages/google-apps-meet/setup.py | 90 + packages/google-apps-meet/testing/.gitignore | 3 + .../testing/constraints-3.10.txt | 6 + .../testing/constraints-3.11.txt | 6 + .../testing/constraints-3.12.txt | 6 + .../testing/constraints-3.7.txt | 9 + .../testing/constraints-3.8.txt | 6 + .../testing/constraints-3.9.txt | 6 + packages/google-apps-meet/tests/__init__.py | 15 + .../google-apps-meet/tests/unit/__init__.py | 15 + .../tests/unit/gapic/__init__.py | 15 + .../tests/unit/gapic/meet_v2beta/__init__.py | 15 + .../test_conference_records_service.py | 9169 +++++++++++++++++ .../gapic/meet_v2beta/test_spaces_service.py | 3495 +++++++ 101 files changed, 32015 insertions(+) create mode 100644 packages/google-apps-meet/.OwlBot.yaml create mode 100644 packages/google-apps-meet/.coveragerc create mode 100644 packages/google-apps-meet/.flake8 create mode 100644 packages/google-apps-meet/.gitignore create mode 100644 packages/google-apps-meet/.repo-metadata.json create mode 100644 packages/google-apps-meet/CHANGELOG.md create mode 100644 packages/google-apps-meet/CODE_OF_CONDUCT.md create mode 100644 packages/google-apps-meet/CONTRIBUTING.rst create mode 100644 packages/google-apps-meet/LICENSE create mode 100644 packages/google-apps-meet/MANIFEST.in create mode 100644 packages/google-apps-meet/README.rst create mode 120000 packages/google-apps-meet/docs/CHANGELOG.md create mode 120000 packages/google-apps-meet/docs/README.rst create mode 100644 packages/google-apps-meet/docs/_static/custom.css create mode 100644 packages/google-apps-meet/docs/_templates/layout.html create mode 100644 packages/google-apps-meet/docs/conf.py create mode 100644 packages/google-apps-meet/docs/index.rst create mode 100644 packages/google-apps-meet/docs/meet_v2beta/conference_records_service.rst create mode 100644 packages/google-apps-meet/docs/meet_v2beta/services_.rst create mode 100644 packages/google-apps-meet/docs/meet_v2beta/spaces_service.rst create mode 100644 packages/google-apps-meet/docs/meet_v2beta/types_.rst create mode 100644 packages/google-apps-meet/docs/multiprocessing.rst create mode 100644 packages/google-apps-meet/google/apps/meet/__init__.py create mode 100644 packages/google-apps-meet/google/apps/meet/gapic_version.py create mode 100644 packages/google-apps-meet/google/apps/meet/py.typed create mode 100644 packages/google-apps-meet/google/apps/meet_v2beta/__init__.py create mode 100644 packages/google-apps-meet/google/apps/meet_v2beta/gapic_metadata.json create mode 100644 packages/google-apps-meet/google/apps/meet_v2beta/gapic_version.py create mode 100644 packages/google-apps-meet/google/apps/meet_v2beta/py.typed create mode 100644 packages/google-apps-meet/google/apps/meet_v2beta/services/__init__.py create mode 100644 packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/__init__.py create mode 100644 packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/async_client.py create mode 100644 packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/client.py create mode 100644 packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/pagers.py create mode 100644 packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/transports/__init__.py create mode 100644 packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/transports/base.py create mode 100644 packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/transports/grpc.py create mode 100644 packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/transports/grpc_asyncio.py create mode 100644 packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/transports/rest.py create mode 100644 packages/google-apps-meet/google/apps/meet_v2beta/services/spaces_service/__init__.py create mode 100644 packages/google-apps-meet/google/apps/meet_v2beta/services/spaces_service/async_client.py create mode 100644 packages/google-apps-meet/google/apps/meet_v2beta/services/spaces_service/client.py create mode 100644 packages/google-apps-meet/google/apps/meet_v2beta/services/spaces_service/transports/__init__.py create mode 100644 packages/google-apps-meet/google/apps/meet_v2beta/services/spaces_service/transports/base.py create mode 100644 packages/google-apps-meet/google/apps/meet_v2beta/services/spaces_service/transports/grpc.py create mode 100644 packages/google-apps-meet/google/apps/meet_v2beta/services/spaces_service/transports/grpc_asyncio.py create mode 100644 packages/google-apps-meet/google/apps/meet_v2beta/services/spaces_service/transports/rest.py create mode 100644 packages/google-apps-meet/google/apps/meet_v2beta/types/__init__.py create mode 100644 packages/google-apps-meet/google/apps/meet_v2beta/types/resource.py create mode 100644 packages/google-apps-meet/google/apps/meet_v2beta/types/service.py create mode 100644 packages/google-apps-meet/mypy.ini create mode 100644 packages/google-apps-meet/noxfile.py create mode 100644 packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_conference_record_async.py create mode 100644 packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_conference_record_sync.py create mode 100644 packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_participant_async.py create mode 100644 packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_participant_session_async.py create mode 100644 packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_participant_session_sync.py create mode 100644 packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_participant_sync.py create mode 100644 packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_recording_async.py create mode 100644 packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_recording_sync.py create mode 100644 packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_transcript_async.py create mode 100644 packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_transcript_entry_async.py create mode 100644 packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_transcript_entry_sync.py create mode 100644 packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_transcript_sync.py create mode 100644 packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_conference_records_async.py create mode 100644 packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_conference_records_sync.py create mode 100644 packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_participant_sessions_async.py create mode 100644 packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_participant_sessions_sync.py create mode 100644 packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_participants_async.py create mode 100644 packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_participants_sync.py create mode 100644 packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_recordings_async.py create mode 100644 packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_recordings_sync.py create mode 100644 packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_transcript_entries_async.py create mode 100644 packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_transcript_entries_sync.py create mode 100644 packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_transcripts_async.py create mode 100644 packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_transcripts_sync.py create mode 100644 packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_spaces_service_create_space_async.py create mode 100644 packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_spaces_service_create_space_sync.py create mode 100644 packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_spaces_service_end_active_conference_async.py create mode 100644 packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_spaces_service_end_active_conference_sync.py create mode 100644 packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_spaces_service_get_space_async.py create mode 100644 packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_spaces_service_get_space_sync.py create mode 100644 packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_spaces_service_update_space_async.py create mode 100644 packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_spaces_service_update_space_sync.py create mode 100644 packages/google-apps-meet/samples/generated_samples/snippet_metadata_google.apps.meet.v2beta.json create mode 100755 packages/google-apps-meet/scripts/decrypt-secrets.sh create mode 100644 packages/google-apps-meet/scripts/fixup_meet_v2beta_keywords.py create mode 100644 packages/google-apps-meet/setup.py create mode 100644 packages/google-apps-meet/testing/.gitignore create mode 100644 packages/google-apps-meet/testing/constraints-3.10.txt create mode 100644 packages/google-apps-meet/testing/constraints-3.11.txt create mode 100644 packages/google-apps-meet/testing/constraints-3.12.txt create mode 100644 packages/google-apps-meet/testing/constraints-3.7.txt create mode 100644 packages/google-apps-meet/testing/constraints-3.8.txt create mode 100644 packages/google-apps-meet/testing/constraints-3.9.txt create mode 100644 packages/google-apps-meet/tests/__init__.py create mode 100644 packages/google-apps-meet/tests/unit/__init__.py create mode 100644 packages/google-apps-meet/tests/unit/gapic/__init__.py create mode 100644 packages/google-apps-meet/tests/unit/gapic/meet_v2beta/__init__.py create mode 100644 packages/google-apps-meet/tests/unit/gapic/meet_v2beta/test_conference_records_service.py create mode 100644 packages/google-apps-meet/tests/unit/gapic/meet_v2beta/test_spaces_service.py diff --git a/packages/google-apps-meet/.OwlBot.yaml b/packages/google-apps-meet/.OwlBot.yaml new file mode 100644 index 000000000000..59a80ef89e64 --- /dev/null +++ b/packages/google-apps-meet/.OwlBot.yaml @@ -0,0 +1,18 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +deep-copy-regex: + - source: /google/apps/meet/(v.*)/.*-py + dest: /owl-bot-staging/google-apps-meet/$1 +api-name: google-apps-meet diff --git a/packages/google-apps-meet/.coveragerc b/packages/google-apps-meet/.coveragerc new file mode 100644 index 000000000000..290aec60f0e6 --- /dev/null +++ b/packages/google-apps-meet/.coveragerc @@ -0,0 +1,13 @@ +[run] +branch = True + +[report] +show_missing = True +omit = + google/apps/meet/__init__.py + google/apps/meet/gapic_version.py +exclude_lines = + # Re-enable the standard pragma + pragma: NO COVER + # Ignore debug-only repr + def __repr__ diff --git a/packages/google-apps-meet/.flake8 b/packages/google-apps-meet/.flake8 new file mode 100644 index 000000000000..87f6e408c47d --- /dev/null +++ b/packages/google-apps-meet/.flake8 @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Generated by synthtool. DO NOT EDIT! +[flake8] +ignore = E203, E231, E266, E501, W503 +exclude = + # Exclude generated code. + **/proto/** + **/gapic/** + **/services/** + **/types/** + *_pb2.py + + # Standard linting exemptions. + **/.nox/** + __pycache__, + .git, + *.pyc, + conf.py diff --git a/packages/google-apps-meet/.gitignore b/packages/google-apps-meet/.gitignore new file mode 100644 index 000000000000..b4243ced74e4 --- /dev/null +++ b/packages/google-apps-meet/.gitignore @@ -0,0 +1,63 @@ +*.py[cod] +*.sw[op] + +# C extensions +*.so + +# Packages +*.egg +*.egg-info +dist +build +eggs +.eggs +parts +bin +var +sdist +develop-eggs +.installed.cfg +lib +lib64 +__pycache__ + +# Installer logs +pip-log.txt + +# Unit test / coverage reports +.coverage +.nox +.cache +.pytest_cache + + +# Mac +.DS_Store + +# JetBrains +.idea + +# VS Code +.vscode + +# emacs +*~ + +# Built documentation +docs/_build +bigquery/docs/generated +docs.metadata + +# Virtual environment +env/ + +# Test logs +coverage.xml +*sponge_log.xml + +# System test environment variables. +system_tests/local_test_setup + +# Make sure a generated file isn't accidentally committed. +pylintrc +pylintrc.test diff --git a/packages/google-apps-meet/.repo-metadata.json b/packages/google-apps-meet/.repo-metadata.json new file mode 100644 index 000000000000..f41d481b40a0 --- /dev/null +++ b/packages/google-apps-meet/.repo-metadata.json @@ -0,0 +1,17 @@ +{ + "name": "google-apps-meet", + "name_pretty": "Google Meet API", + "api_description": "Create and manage meetings in Google Meet.", + "product_documentation": "https://developers.google.com/meet/api/guides/overview", + "client_documentation": "https://googleapis.dev/python/google-apps-meet/latest", + "issue_tracker": "https://issuetracker.google.com/issues/new?component=1216362&template=1766418", + "release_level": "preview", + "language": "python", + "library_type": "GAPIC_AUTO", + "repo": "googleapis/google-cloud-python", + "distribution_name": "google-apps-meet", + "api_id": "meet.googleapis.com", + "default_version": "v2beta", + "codeowner_team": "", + "api_shortname": "meet" +} diff --git a/packages/google-apps-meet/CHANGELOG.md b/packages/google-apps-meet/CHANGELOG.md new file mode 100644 index 000000000000..5ddad421e08f --- /dev/null +++ b/packages/google-apps-meet/CHANGELOG.md @@ -0,0 +1 @@ +# Changelog \ No newline at end of file diff --git a/packages/google-apps-meet/CODE_OF_CONDUCT.md b/packages/google-apps-meet/CODE_OF_CONDUCT.md new file mode 100644 index 000000000000..039f43681204 --- /dev/null +++ b/packages/google-apps-meet/CODE_OF_CONDUCT.md @@ -0,0 +1,95 @@ + +# Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of +experience, education, socio-economic status, nationality, personal appearance, +race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, or to ban temporarily or permanently any +contributor for other behaviors that they deem inappropriate, threatening, +offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +This Code of Conduct also applies outside the project spaces when the Project +Steward has a reasonable belief that an individual's behavior may have a +negative impact on the project or its community. + +## Conflict Resolution + +We do not believe that all conflict is bad; healthy debate and disagreement +often yield positive results. However, it is never okay to be disrespectful or +to engage in behavior that violates the project’s code of conduct. + +If you see someone violating the code of conduct, you are encouraged to address +the behavior directly with those involved. Many issues can be resolved quickly +and easily, and this gives people more control over the outcome of their +dispute. If you are unable to resolve the matter for any reason, or if the +behavior is threatening or harassing, report it. We are dedicated to providing +an environment where participants feel welcome and safe. + + +Reports should be directed to *googleapis-stewards@google.com*, the +Project Steward(s) for *Google Cloud Client Libraries*. It is the Project Steward’s duty to +receive and address reported violations of the code of conduct. They will then +work with a committee consisting of representatives from the Open Source +Programs Office and the Google Open Source Strategy team. If for any reason you +are uncomfortable reaching out to the Project Steward, please email +opensource@google.com. + +We will investigate every complaint, but you may not receive a direct response. +We will use our discretion in determining when and how to follow up on reported +incidents, which may range from not taking action to permanent expulsion from +the project and project-sponsored spaces. We will notify the accused of the +report and provide them an opportunity to discuss it before any action is taken. +The identity of the reporter will be omitted from the details of the report +supplied to the accused. In potentially harmful situations, such as ongoing +harassment or threats to anyone's safety, we may take action without notice. + +## Attribution + +This Code of Conduct is adapted from the Contributor Covenant, version 1.4, +available at +https://www.contributor-covenant.org/version/1/4/code-of-conduct.html \ No newline at end of file diff --git a/packages/google-apps-meet/CONTRIBUTING.rst b/packages/google-apps-meet/CONTRIBUTING.rst new file mode 100644 index 000000000000..31df2453a561 --- /dev/null +++ b/packages/google-apps-meet/CONTRIBUTING.rst @@ -0,0 +1,271 @@ +.. Generated by synthtool. DO NOT EDIT! +############ +Contributing +############ + +#. **Please sign one of the contributor license agreements below.** +#. Fork the repo, develop and test your code changes, add docs. +#. Make sure that your commit messages clearly describe the changes. +#. Send a pull request. (Please Read: `Faster Pull Request Reviews`_) + +.. _Faster Pull Request Reviews: https://github.com/kubernetes/community/blob/master/contributors/guide/pull-requests.md#best-practices-for-faster-reviews + +.. contents:: Here are some guidelines for hacking on the Google Cloud Client libraries. + +*************** +Adding Features +*************** + +In order to add a feature: + +- The feature must be documented in both the API and narrative + documentation. + +- The feature must work fully on the following CPython versions: + 3.7, 3.8, 3.9, 3.10, 3.11 and 3.12 on both UNIX and Windows. + +- The feature must not add unnecessary dependencies (where + "unnecessary" is of course subjective, but new dependencies should + be discussed). + +**************************** +Using a Development Checkout +**************************** + +You'll have to create a development environment using a Git checkout: + +- While logged into your GitHub account, navigate to the + ``google-cloud-python`` `repo`_ on GitHub. + +- Fork and clone the ``google-cloud-python`` repository to your GitHub account by + clicking the "Fork" button. + +- Clone your fork of ``google-cloud-python`` from your GitHub account to your local + computer, substituting your account username and specifying the destination + as ``hack-on-google-cloud-python``. E.g.:: + + $ cd ${HOME} + $ git clone git@github.com:USERNAME/google-cloud-python.git hack-on-google-cloud-python + $ cd hack-on-google-cloud-python + # Configure remotes such that you can pull changes from the googleapis/google-cloud-python + # repository into your local repository. + $ git remote add upstream git@github.com:googleapis/google-cloud-python.git + # fetch and merge changes from upstream into main + $ git fetch upstream + $ git merge upstream/main + +Now your local repo is set up such that you will push changes to your GitHub +repo, from which you can submit a pull request. + +To work on the codebase and run the tests, we recommend using ``nox``, +but you can also use a ``virtualenv`` of your own creation. + +.. _repo: https://github.com/googleapis/google-cloud-python + +Using ``nox`` +============= + +We use `nox `__ to instrument our tests. + +- To test your changes, run unit tests with ``nox``:: + $ nox -s unit + +- To run a single unit test:: + + $ nox -s unit-3.12 -- -k + + + .. note:: + + The unit tests and system tests are described in the + ``noxfile.py`` files in each directory. + +.. nox: https://pypi.org/project/nox/ + +***************************************** +I'm getting weird errors... Can you help? +***************************************** + +If the error mentions ``Python.h`` not being found, +install ``python-dev`` and try again. +On Debian/Ubuntu:: + + $ sudo apt-get install python-dev + +************ +Coding Style +************ +- We use the automatic code formatter ``black``. You can run it using + the nox session ``blacken``. This will eliminate many lint errors. Run via:: + + $ nox -s blacken + +- PEP8 compliance is required, with exceptions defined in the linter configuration. + If you have ``nox`` installed, you can test that you have not introduced + any non-compliant code via:: + + $ nox -s lint + +- In order to make ``nox -s lint`` run faster, you can set some environment + variables:: + + export GOOGLE_CLOUD_TESTING_REMOTE="upstream" + export GOOGLE_CLOUD_TESTING_BRANCH="main" + + By doing this, you are specifying the location of the most up-to-date + version of ``google-cloud-python``. The + remote name ``upstream`` should point to the official ``googleapis`` + checkout and the branch should be the default branch on that remote (``main``). + +- This repository contains configuration for the + `pre-commit `__ tool, which automates checking + our linters during a commit. If you have it installed on your ``$PATH``, + you can enable enforcing those checks via: + +.. code-block:: bash + + $ pre-commit install + pre-commit installed at .git/hooks/pre-commit + +Exceptions to PEP8: + +- Many unit tests use a helper method, ``_call_fut`` ("FUT" is short for + "Function-Under-Test"), which is PEP8-incompliant, but more readable. + Some also use a local variable, ``MUT`` (short for "Module-Under-Test"). + +******************** +Running System Tests +******************** + +- To run system tests, you can execute:: + + # Run all system tests + $ nox -s system + + # Run a single system test + $ nox -s system-3.12 -- -k + + + .. note:: + + System tests are only configured to run under Python 3.8, 3.9, 3.10, 3.11 and 3.12. + For expediency, we do not run them in older versions of Python 3. + + This alone will not run the tests. You'll need to change some local + auth settings and change some configuration in your project to + run all the tests. + +- System tests will be run against an actual project. You should use local credentials from gcloud when possible. See `Best practices for application authentication `__. Some tests require a service account. For those tests see `Authenticating as a service account `__. + +************* +Test Coverage +************* + +- The codebase *must* have 100% test statement coverage after each commit. + You can test coverage via ``nox -s cover``. + +****************************************************** +Documentation Coverage and Building HTML Documentation +****************************************************** + +If you fix a bug, and the bug requires an API or behavior modification, all +documentation in this package which references that API or behavior must be +changed to reflect the bug fix, ideally in the same commit that fixes the bug +or adds the feature. + +Build the docs via: + + $ nox -s docs + +************************* +Samples and code snippets +************************* + +Code samples and snippets live in the `samples/` catalogue. Feel free to +provide more examples, but make sure to write tests for those examples. +Each folder containing example code requires its own `noxfile.py` script +which automates testing. If you decide to create a new folder, you can +base it on the `samples/snippets` folder (providing `noxfile.py` and +the requirements files). + +The tests will run against a real Google Cloud Project, so you should +configure them just like the System Tests. + +- To run sample tests, you can execute:: + + # Run all tests in a folder + $ cd samples/snippets + $ nox -s py-3.8 + + # Run a single sample test + $ cd samples/snippets + $ nox -s py-3.8 -- -k + +******************************************** +Note About ``README`` as it pertains to PyPI +******************************************** + +The `description on PyPI`_ for the project comes directly from the +``README``. Due to the reStructuredText (``rst``) parser used by +PyPI, relative links which will work on GitHub (e.g. ``CONTRIBUTING.rst`` +instead of +``https://github.com/googleapis/google-cloud-python/blob/main/CONTRIBUTING.rst``) +may cause problems creating links or rendering the description. + +.. _description on PyPI: https://pypi.org/project/google-apps-meet + + +************************* +Supported Python Versions +************************* + +We support: + +- `Python 3.7`_ +- `Python 3.8`_ +- `Python 3.9`_ +- `Python 3.10`_ +- `Python 3.11`_ +- `Python 3.12`_ + +.. _Python 3.7: https://docs.python.org/3.7/ +.. _Python 3.8: https://docs.python.org/3.8/ +.. _Python 3.9: https://docs.python.org/3.9/ +.. _Python 3.10: https://docs.python.org/3.10/ +.. _Python 3.11: https://docs.python.org/3.11/ +.. _Python 3.12: https://docs.python.org/3.12/ + + +Supported versions can be found in our ``noxfile.py`` `config`_. + +.. _config: https://github.com/googleapis/google-cloud-python/blob/main/packages/google-apps-meet/noxfile.py + + +********** +Versioning +********** + +This library follows `Semantic Versioning`_. + +.. _Semantic Versioning: http://semver.org/ + +Some packages are currently in major version zero (``0.y.z``), which means that +anything may change at any time and the public API should not be considered +stable. + +****************************** +Contributor License Agreements +****************************** + +Before we can accept your pull requests you'll need to sign a Contributor +License Agreement (CLA): + +- **If you are an individual writing original source code** and **you own the + intellectual property**, then you'll need to sign an + `individual CLA `__. +- **If you work for a company that wants to allow you to contribute your work**, + then you'll need to sign a + `corporate CLA `__. + +You can sign these electronically (just scroll to the bottom). After that, +we'll be able to accept your pull requests. diff --git a/packages/google-apps-meet/LICENSE b/packages/google-apps-meet/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/packages/google-apps-meet/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/packages/google-apps-meet/MANIFEST.in b/packages/google-apps-meet/MANIFEST.in new file mode 100644 index 000000000000..e0a66705318e --- /dev/null +++ b/packages/google-apps-meet/MANIFEST.in @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Generated by synthtool. DO NOT EDIT! +include README.rst LICENSE +recursive-include google *.json *.proto py.typed +recursive-include tests * +global-exclude *.py[co] +global-exclude __pycache__ + +# Exclude scripts for samples readmegen +prune scripts/readme-gen diff --git a/packages/google-apps-meet/README.rst b/packages/google-apps-meet/README.rst new file mode 100644 index 000000000000..f8d80407f4e1 --- /dev/null +++ b/packages/google-apps-meet/README.rst @@ -0,0 +1,108 @@ +Python Client for Google Meet API +================================= + +|preview| |pypi| |versions| + +`Google Meet API`_: Create and manage meetings in Google Meet. + +- `Client Library Documentation`_ +- `Product Documentation`_ + +.. |preview| image:: https://img.shields.io/badge/support-preview-orange.svg + :target: https://github.com/googleapis/google-cloud-python/blob/main/README.rst#stability-levels +.. |pypi| image:: https://img.shields.io/pypi/v/google-apps-meet.svg + :target: https://pypi.org/project/google-apps-meet/ +.. |versions| image:: https://img.shields.io/pypi/pyversions/google-apps-meet.svg + :target: https://pypi.org/project/google-apps-meet/ +.. _Google Meet API: https://developers.google.com/meet/api/guides/overview +.. _Client Library Documentation: https://googleapis.dev/python/google-apps-meet/latest +.. _Product Documentation: https://developers.google.com/meet/api/guides/overview + +Quick Start +----------- + +In order to use this library, you first need to go through the following steps: + +1. `Select or create a Cloud Platform project.`_ +2. `Enable billing for your project.`_ +3. `Enable the Google Meet API.`_ +4. `Setup Authentication.`_ + +.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project +.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project +.. _Enable the Google Meet API.: https://developers.google.com/meet/api/guides/overview +.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html + +Installation +~~~~~~~~~~~~ + +Install this library in a virtual environment using `venv`_. `venv`_ is a tool that +creates isolated Python environments. These isolated environments can have separate +versions of Python packages, which allows you to isolate one project's dependencies +from the dependencies of other projects. + +With `venv`_, it's possible to install this library without needing system +install permissions, and without clashing with the installed system +dependencies. + +.. _`venv`: https://docs.python.org/3/library/venv.html + + +Code samples and snippets +~~~~~~~~~~~~~~~~~~~~~~~~~ + +Code samples and snippets live in the `samples/`_ folder. + +.. _samples/: https://github.com/googleapis/google-cloud-python/tree/main/packages/google-apps-meet/samples + + +Supported Python Versions +^^^^^^^^^^^^^^^^^^^^^^^^^ +Our client libraries are compatible with all current `active`_ and `maintenance`_ versions of +Python. + +Python >= 3.7 + +.. _active: https://devguide.python.org/devcycle/#in-development-main-branch +.. _maintenance: https://devguide.python.org/devcycle/#maintenance-branches + +Unsupported Python Versions +^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Python <= 3.6 + +If you are using an `end-of-life`_ +version of Python, we recommend that you update as soon as possible to an actively supported version. + +.. _end-of-life: https://devguide.python.org/devcycle/#end-of-life-branches + +Mac/Linux +^^^^^^^^^ + +.. code-block:: console + + python3 -m venv + source /bin/activate + pip install google-apps-meet + + +Windows +^^^^^^^ + +.. code-block:: console + + py -m venv + .\\Scripts\activate + pip install google-apps-meet + +Next Steps +~~~~~~~~~~ + +- Read the `Client Library Documentation`_ for Google Meet API + to see other available methods on the client. +- Read the `Google Meet API Product documentation`_ to learn + more about the product and see How-to Guides. +- View this `README`_ to see the full list of Cloud + APIs that we cover. + +.. _Google Meet API Product documentation: https://developers.google.com/meet/api/guides/overview +.. _README: https://github.com/googleapis/google-cloud-python/blob/main/README.rst diff --git a/packages/google-apps-meet/docs/CHANGELOG.md b/packages/google-apps-meet/docs/CHANGELOG.md new file mode 120000 index 000000000000..04c99a55caae --- /dev/null +++ b/packages/google-apps-meet/docs/CHANGELOG.md @@ -0,0 +1 @@ +../CHANGELOG.md \ No newline at end of file diff --git a/packages/google-apps-meet/docs/README.rst b/packages/google-apps-meet/docs/README.rst new file mode 120000 index 000000000000..89a0106941ff --- /dev/null +++ b/packages/google-apps-meet/docs/README.rst @@ -0,0 +1 @@ +../README.rst \ No newline at end of file diff --git a/packages/google-apps-meet/docs/_static/custom.css b/packages/google-apps-meet/docs/_static/custom.css new file mode 100644 index 000000000000..b0a295464b23 --- /dev/null +++ b/packages/google-apps-meet/docs/_static/custom.css @@ -0,0 +1,20 @@ +div#python2-eol { + border-color: red; + border-width: medium; +} + +/* Ensure minimum width for 'Parameters' / 'Returns' column */ +dl.field-list > dt { + min-width: 100px +} + +/* Insert space between methods for readability */ +dl.method { + padding-top: 10px; + padding-bottom: 10px +} + +/* Insert empty space between classes */ +dl.class { + padding-bottom: 50px +} diff --git a/packages/google-apps-meet/docs/_templates/layout.html b/packages/google-apps-meet/docs/_templates/layout.html new file mode 100644 index 000000000000..6316a537f72b --- /dev/null +++ b/packages/google-apps-meet/docs/_templates/layout.html @@ -0,0 +1,50 @@ + +{% extends "!layout.html" %} +{%- block content %} +{%- if theme_fixed_sidebar|lower == 'true' %} +
+ {{ sidebar() }} + {%- block document %} +
+ {%- if render_sidebar %} +
+ {%- endif %} + + {%- block relbar_top %} + {%- if theme_show_relbar_top|tobool %} + + {%- endif %} + {% endblock %} + +
+
+ As of January 1, 2020 this library no longer supports Python 2 on the latest released version. + Library versions released prior to that date will continue to be available. For more information please + visit Python 2 support on Google Cloud. +
+ {% block body %} {% endblock %} +
+ + {%- block relbar_bottom %} + {%- if theme_show_relbar_bottom|tobool %} + + {%- endif %} + {% endblock %} + + {%- if render_sidebar %} +
+ {%- endif %} +
+ {%- endblock %} +
+
+{%- else %} +{{ super() }} +{%- endif %} +{%- endblock %} diff --git a/packages/google-apps-meet/docs/conf.py b/packages/google-apps-meet/docs/conf.py new file mode 100644 index 000000000000..cfa92fb8d428 --- /dev/null +++ b/packages/google-apps-meet/docs/conf.py @@ -0,0 +1,384 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# google-apps-meet documentation build configuration file +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import os +import shlex +import sys + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath("..")) + +# For plugins that can not read conf.py. +# See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85 +sys.path.insert(0, os.path.abspath(".")) + +__version__ = "" + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +needs_sphinx = "1.5.5" + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.intersphinx", + "sphinx.ext.coverage", + "sphinx.ext.doctest", + "sphinx.ext.napoleon", + "sphinx.ext.todo", + "sphinx.ext.viewcode", + "recommonmark", +] + +# autodoc/autosummary flags +autoclass_content = "both" +autodoc_default_options = {"members": True} +autosummary_generate = True + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# source_suffix = ['.rst', '.md'] +source_suffix = [".rst", ".md"] + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The root toctree document. +root_doc = "index" + +# General information about the project. +project = "google-apps-meet" +copyright = "2019, Google" +author = "Google APIs" + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The full version, including alpha/beta/rc tags. +release = __version__ +# The short X.Y version. +version = ".".join(release.split(".")[0:2]) + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = [ + "_build", + "**/.nox/**/*", + "samples/AUTHORING_GUIDE.md", + "samples/CONTRIBUTING.md", + "samples/snippets/README.rst", +] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = "alabaster" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = { + "description": "Google Cloud Client Libraries for google-apps-meet", + "github_user": "googleapis", + "github_repo": "google-cloud-python", + "github_banner": True, + "font_family": "'Roboto', Georgia, sans", + "head_font_family": "'Roboto', Georgia, serif", + "code_font_family": "'Roboto Mono', 'Consolas', monospace", +} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' +# html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# Now only 'ja' uses this config value +# html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +# html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = "google-apps-meet-doc" + +# -- Options for warnings ------------------------------------------------------ + + +suppress_warnings = [ + # Temporarily suppress this to avoid "more than one target found for + # cross-reference" warning, which are intractable for us to avoid while in + # a mono-repo. + # See https://github.com/sphinx-doc/sphinx/blob + # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 + "ref.python" +] + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + #'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + #'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + #'preamble': '', + # Latex figure (float) alignment + #'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ( + root_doc, + "google-apps-meet.tex", + "google-apps-meet Documentation", + author, + "manual", + ) +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ( + root_doc, + "google-apps-meet", + "google-apps-meet Documentation", + [author], + 1, + ) +] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + root_doc, + "google-apps-meet", + "google-apps-meet Documentation", + author, + "google-apps-meet", + "google-apps-meet Library", + "APIs", + ) +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + "python": ("https://python.readthedocs.org/en/latest/", None), + "google-auth": ("https://googleapis.dev/python/google-auth/latest/", None), + "google.api_core": ( + "https://googleapis.dev/python/google-api-core/latest/", + None, + ), + "grpc": ("https://grpc.github.io/grpc/python/", None), + "proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None), + "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), +} + + +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = True +napoleon_include_private_with_doc = False +napoleon_include_special_with_doc = True +napoleon_use_admonition_for_examples = False +napoleon_use_admonition_for_notes = False +napoleon_use_admonition_for_references = False +napoleon_use_ivar = False +napoleon_use_param = True +napoleon_use_rtype = True diff --git a/packages/google-apps-meet/docs/index.rst b/packages/google-apps-meet/docs/index.rst new file mode 100644 index 000000000000..e8e59615e6c8 --- /dev/null +++ b/packages/google-apps-meet/docs/index.rst @@ -0,0 +1,23 @@ +.. include:: README.rst + +.. include:: multiprocessing.rst + + +API Reference +------------- +.. toctree:: + :maxdepth: 2 + + meet_v2beta/services_ + meet_v2beta/types_ + + +Changelog +--------- + +For a list of all ``google-apps-meet`` releases: + +.. toctree:: + :maxdepth: 2 + + CHANGELOG diff --git a/packages/google-apps-meet/docs/meet_v2beta/conference_records_service.rst b/packages/google-apps-meet/docs/meet_v2beta/conference_records_service.rst new file mode 100644 index 000000000000..686a355da5cb --- /dev/null +++ b/packages/google-apps-meet/docs/meet_v2beta/conference_records_service.rst @@ -0,0 +1,10 @@ +ConferenceRecordsService +------------------------------------------ + +.. automodule:: google.apps.meet_v2beta.services.conference_records_service + :members: + :inherited-members: + +.. automodule:: google.apps.meet_v2beta.services.conference_records_service.pagers + :members: + :inherited-members: diff --git a/packages/google-apps-meet/docs/meet_v2beta/services_.rst b/packages/google-apps-meet/docs/meet_v2beta/services_.rst new file mode 100644 index 000000000000..d64aa084b6c5 --- /dev/null +++ b/packages/google-apps-meet/docs/meet_v2beta/services_.rst @@ -0,0 +1,7 @@ +Services for Google Apps Meet v2beta API +======================================== +.. toctree:: + :maxdepth: 2 + + conference_records_service + spaces_service diff --git a/packages/google-apps-meet/docs/meet_v2beta/spaces_service.rst b/packages/google-apps-meet/docs/meet_v2beta/spaces_service.rst new file mode 100644 index 000000000000..3e799ddd3d66 --- /dev/null +++ b/packages/google-apps-meet/docs/meet_v2beta/spaces_service.rst @@ -0,0 +1,6 @@ +SpacesService +------------------------------- + +.. automodule:: google.apps.meet_v2beta.services.spaces_service + :members: + :inherited-members: diff --git a/packages/google-apps-meet/docs/meet_v2beta/types_.rst b/packages/google-apps-meet/docs/meet_v2beta/types_.rst new file mode 100644 index 000000000000..524ba9eec204 --- /dev/null +++ b/packages/google-apps-meet/docs/meet_v2beta/types_.rst @@ -0,0 +1,6 @@ +Types for Google Apps Meet v2beta API +===================================== + +.. automodule:: google.apps.meet_v2beta.types + :members: + :show-inheritance: diff --git a/packages/google-apps-meet/docs/multiprocessing.rst b/packages/google-apps-meet/docs/multiprocessing.rst new file mode 100644 index 000000000000..536d17b2ea65 --- /dev/null +++ b/packages/google-apps-meet/docs/multiprocessing.rst @@ -0,0 +1,7 @@ +.. note:: + + Because this client uses :mod:`grpc` library, it is safe to + share instances across threads. In multiprocessing scenarios, the best + practice is to create client instances *after* the invocation of + :func:`os.fork` by :class:`multiprocessing.pool.Pool` or + :class:`multiprocessing.Process`. diff --git a/packages/google-apps-meet/google/apps/meet/__init__.py b/packages/google-apps-meet/google/apps/meet/__init__.py new file mode 100644 index 000000000000..2769401e90ca --- /dev/null +++ b/packages/google-apps-meet/google/apps/meet/__init__.py @@ -0,0 +1,113 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from google.apps.meet import gapic_version as package_version + +__version__ = package_version.__version__ + + +from google.apps.meet_v2beta.services.conference_records_service.async_client import ( + ConferenceRecordsServiceAsyncClient, +) +from google.apps.meet_v2beta.services.conference_records_service.client import ( + ConferenceRecordsServiceClient, +) +from google.apps.meet_v2beta.services.spaces_service.async_client import ( + SpacesServiceAsyncClient, +) +from google.apps.meet_v2beta.services.spaces_service.client import SpacesServiceClient +from google.apps.meet_v2beta.types.resource import ( + ActiveConference, + AnonymousUser, + ConferenceRecord, + DocsDestination, + DriveDestination, + Participant, + ParticipantSession, + PhoneUser, + Recording, + SignedinUser, + Space, + SpaceConfig, + Transcript, + TranscriptEntry, +) +from google.apps.meet_v2beta.types.service import ( + CreateSpaceRequest, + EndActiveConferenceRequest, + GetConferenceRecordRequest, + GetParticipantRequest, + GetParticipantSessionRequest, + GetRecordingRequest, + GetSpaceRequest, + GetTranscriptEntryRequest, + GetTranscriptRequest, + ListConferenceRecordsRequest, + ListConferenceRecordsResponse, + ListParticipantSessionsRequest, + ListParticipantSessionsResponse, + ListParticipantsRequest, + ListParticipantsResponse, + ListRecordingsRequest, + ListRecordingsResponse, + ListTranscriptEntriesRequest, + ListTranscriptEntriesResponse, + ListTranscriptsRequest, + ListTranscriptsResponse, + UpdateSpaceRequest, +) + +__all__ = ( + "ConferenceRecordsServiceClient", + "ConferenceRecordsServiceAsyncClient", + "SpacesServiceClient", + "SpacesServiceAsyncClient", + "ActiveConference", + "AnonymousUser", + "ConferenceRecord", + "DocsDestination", + "DriveDestination", + "Participant", + "ParticipantSession", + "PhoneUser", + "Recording", + "SignedinUser", + "Space", + "SpaceConfig", + "Transcript", + "TranscriptEntry", + "CreateSpaceRequest", + "EndActiveConferenceRequest", + "GetConferenceRecordRequest", + "GetParticipantRequest", + "GetParticipantSessionRequest", + "GetRecordingRequest", + "GetSpaceRequest", + "GetTranscriptEntryRequest", + "GetTranscriptRequest", + "ListConferenceRecordsRequest", + "ListConferenceRecordsResponse", + "ListParticipantSessionsRequest", + "ListParticipantSessionsResponse", + "ListParticipantsRequest", + "ListParticipantsResponse", + "ListRecordingsRequest", + "ListRecordingsResponse", + "ListTranscriptEntriesRequest", + "ListTranscriptEntriesResponse", + "ListTranscriptsRequest", + "ListTranscriptsResponse", + "UpdateSpaceRequest", +) diff --git a/packages/google-apps-meet/google/apps/meet/gapic_version.py b/packages/google-apps-meet/google/apps/meet/gapic_version.py new file mode 100644 index 000000000000..360a0d13ebdd --- /dev/null +++ b/packages/google-apps-meet/google/apps/meet/gapic_version.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +__version__ = "0.0.0" # {x-release-please-version} diff --git a/packages/google-apps-meet/google/apps/meet/py.typed b/packages/google-apps-meet/google/apps/meet/py.typed new file mode 100644 index 000000000000..6bbef0e04410 --- /dev/null +++ b/packages/google-apps-meet/google/apps/meet/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-apps-meet package uses inline types. diff --git a/packages/google-apps-meet/google/apps/meet_v2beta/__init__.py b/packages/google-apps-meet/google/apps/meet_v2beta/__init__.py new file mode 100644 index 000000000000..4acf3b15228e --- /dev/null +++ b/packages/google-apps-meet/google/apps/meet_v2beta/__init__.py @@ -0,0 +1,108 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from google.apps.meet_v2beta import gapic_version as package_version + +__version__ = package_version.__version__ + + +from .services.conference_records_service import ( + ConferenceRecordsServiceAsyncClient, + ConferenceRecordsServiceClient, +) +from .services.spaces_service import SpacesServiceAsyncClient, SpacesServiceClient +from .types.resource import ( + ActiveConference, + AnonymousUser, + ConferenceRecord, + DocsDestination, + DriveDestination, + Participant, + ParticipantSession, + PhoneUser, + Recording, + SignedinUser, + Space, + SpaceConfig, + Transcript, + TranscriptEntry, +) +from .types.service import ( + CreateSpaceRequest, + EndActiveConferenceRequest, + GetConferenceRecordRequest, + GetParticipantRequest, + GetParticipantSessionRequest, + GetRecordingRequest, + GetSpaceRequest, + GetTranscriptEntryRequest, + GetTranscriptRequest, + ListConferenceRecordsRequest, + ListConferenceRecordsResponse, + ListParticipantSessionsRequest, + ListParticipantSessionsResponse, + ListParticipantsRequest, + ListParticipantsResponse, + ListRecordingsRequest, + ListRecordingsResponse, + ListTranscriptEntriesRequest, + ListTranscriptEntriesResponse, + ListTranscriptsRequest, + ListTranscriptsResponse, + UpdateSpaceRequest, +) + +__all__ = ( + "ConferenceRecordsServiceAsyncClient", + "SpacesServiceAsyncClient", + "ActiveConference", + "AnonymousUser", + "ConferenceRecord", + "ConferenceRecordsServiceClient", + "CreateSpaceRequest", + "DocsDestination", + "DriveDestination", + "EndActiveConferenceRequest", + "GetConferenceRecordRequest", + "GetParticipantRequest", + "GetParticipantSessionRequest", + "GetRecordingRequest", + "GetSpaceRequest", + "GetTranscriptEntryRequest", + "GetTranscriptRequest", + "ListConferenceRecordsRequest", + "ListConferenceRecordsResponse", + "ListParticipantSessionsRequest", + "ListParticipantSessionsResponse", + "ListParticipantsRequest", + "ListParticipantsResponse", + "ListRecordingsRequest", + "ListRecordingsResponse", + "ListTranscriptEntriesRequest", + "ListTranscriptEntriesResponse", + "ListTranscriptsRequest", + "ListTranscriptsResponse", + "Participant", + "ParticipantSession", + "PhoneUser", + "Recording", + "SignedinUser", + "Space", + "SpaceConfig", + "SpacesServiceClient", + "Transcript", + "TranscriptEntry", + "UpdateSpaceRequest", +) diff --git a/packages/google-apps-meet/google/apps/meet_v2beta/gapic_metadata.json b/packages/google-apps-meet/google/apps/meet_v2beta/gapic_metadata.json new file mode 100644 index 000000000000..07fcccd295b7 --- /dev/null +++ b/packages/google-apps-meet/google/apps/meet_v2beta/gapic_metadata.json @@ -0,0 +1,287 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.apps.meet_v2beta", + "protoPackage": "google.apps.meet.v2beta", + "schema": "1.0", + "services": { + "ConferenceRecordsService": { + "clients": { + "grpc": { + "libraryClient": "ConferenceRecordsServiceClient", + "rpcs": { + "GetConferenceRecord": { + "methods": [ + "get_conference_record" + ] + }, + "GetParticipant": { + "methods": [ + "get_participant" + ] + }, + "GetParticipantSession": { + "methods": [ + "get_participant_session" + ] + }, + "GetRecording": { + "methods": [ + "get_recording" + ] + }, + "GetTranscript": { + "methods": [ + "get_transcript" + ] + }, + "GetTranscriptEntry": { + "methods": [ + "get_transcript_entry" + ] + }, + "ListConferenceRecords": { + "methods": [ + "list_conference_records" + ] + }, + "ListParticipantSessions": { + "methods": [ + "list_participant_sessions" + ] + }, + "ListParticipants": { + "methods": [ + "list_participants" + ] + }, + "ListRecordings": { + "methods": [ + "list_recordings" + ] + }, + "ListTranscriptEntries": { + "methods": [ + "list_transcript_entries" + ] + }, + "ListTranscripts": { + "methods": [ + "list_transcripts" + ] + } + } + }, + "grpc-async": { + "libraryClient": "ConferenceRecordsServiceAsyncClient", + "rpcs": { + "GetConferenceRecord": { + "methods": [ + "get_conference_record" + ] + }, + "GetParticipant": { + "methods": [ + "get_participant" + ] + }, + "GetParticipantSession": { + "methods": [ + "get_participant_session" + ] + }, + "GetRecording": { + "methods": [ + "get_recording" + ] + }, + "GetTranscript": { + "methods": [ + "get_transcript" + ] + }, + "GetTranscriptEntry": { + "methods": [ + "get_transcript_entry" + ] + }, + "ListConferenceRecords": { + "methods": [ + "list_conference_records" + ] + }, + "ListParticipantSessions": { + "methods": [ + "list_participant_sessions" + ] + }, + "ListParticipants": { + "methods": [ + "list_participants" + ] + }, + "ListRecordings": { + "methods": [ + "list_recordings" + ] + }, + "ListTranscriptEntries": { + "methods": [ + "list_transcript_entries" + ] + }, + "ListTranscripts": { + "methods": [ + "list_transcripts" + ] + } + } + }, + "rest": { + "libraryClient": "ConferenceRecordsServiceClient", + "rpcs": { + "GetConferenceRecord": { + "methods": [ + "get_conference_record" + ] + }, + "GetParticipant": { + "methods": [ + "get_participant" + ] + }, + "GetParticipantSession": { + "methods": [ + "get_participant_session" + ] + }, + "GetRecording": { + "methods": [ + "get_recording" + ] + }, + "GetTranscript": { + "methods": [ + "get_transcript" + ] + }, + "GetTranscriptEntry": { + "methods": [ + "get_transcript_entry" + ] + }, + "ListConferenceRecords": { + "methods": [ + "list_conference_records" + ] + }, + "ListParticipantSessions": { + "methods": [ + "list_participant_sessions" + ] + }, + "ListParticipants": { + "methods": [ + "list_participants" + ] + }, + "ListRecordings": { + "methods": [ + "list_recordings" + ] + }, + "ListTranscriptEntries": { + "methods": [ + "list_transcript_entries" + ] + }, + "ListTranscripts": { + "methods": [ + "list_transcripts" + ] + } + } + } + } + }, + "SpacesService": { + "clients": { + "grpc": { + "libraryClient": "SpacesServiceClient", + "rpcs": { + "CreateSpace": { + "methods": [ + "create_space" + ] + }, + "EndActiveConference": { + "methods": [ + "end_active_conference" + ] + }, + "GetSpace": { + "methods": [ + "get_space" + ] + }, + "UpdateSpace": { + "methods": [ + "update_space" + ] + } + } + }, + "grpc-async": { + "libraryClient": "SpacesServiceAsyncClient", + "rpcs": { + "CreateSpace": { + "methods": [ + "create_space" + ] + }, + "EndActiveConference": { + "methods": [ + "end_active_conference" + ] + }, + "GetSpace": { + "methods": [ + "get_space" + ] + }, + "UpdateSpace": { + "methods": [ + "update_space" + ] + } + } + }, + "rest": { + "libraryClient": "SpacesServiceClient", + "rpcs": { + "CreateSpace": { + "methods": [ + "create_space" + ] + }, + "EndActiveConference": { + "methods": [ + "end_active_conference" + ] + }, + "GetSpace": { + "methods": [ + "get_space" + ] + }, + "UpdateSpace": { + "methods": [ + "update_space" + ] + } + } + } + } + } + } +} diff --git a/packages/google-apps-meet/google/apps/meet_v2beta/gapic_version.py b/packages/google-apps-meet/google/apps/meet_v2beta/gapic_version.py new file mode 100644 index 000000000000..360a0d13ebdd --- /dev/null +++ b/packages/google-apps-meet/google/apps/meet_v2beta/gapic_version.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +__version__ = "0.0.0" # {x-release-please-version} diff --git a/packages/google-apps-meet/google/apps/meet_v2beta/py.typed b/packages/google-apps-meet/google/apps/meet_v2beta/py.typed new file mode 100644 index 000000000000..6bbef0e04410 --- /dev/null +++ b/packages/google-apps-meet/google/apps/meet_v2beta/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-apps-meet package uses inline types. diff --git a/packages/google-apps-meet/google/apps/meet_v2beta/services/__init__.py b/packages/google-apps-meet/google/apps/meet_v2beta/services/__init__.py new file mode 100644 index 000000000000..89a37dc92c5a --- /dev/null +++ b/packages/google-apps-meet/google/apps/meet_v2beta/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/__init__.py b/packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/__init__.py new file mode 100644 index 000000000000..86d7a0ae95f1 --- /dev/null +++ b/packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .async_client import ConferenceRecordsServiceAsyncClient +from .client import ConferenceRecordsServiceClient + +__all__ = ( + "ConferenceRecordsServiceClient", + "ConferenceRecordsServiceAsyncClient", +) diff --git a/packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/async_client.py b/packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/async_client.py new file mode 100644 index 000000000000..e19d47b5a540 --- /dev/null +++ b/packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/async_client.py @@ -0,0 +1,1696 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry_async as retries +from google.api_core.client_options import ClientOptions +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.apps.meet_v2beta import gapic_version as package_version + +try: + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + +from google.apps.meet_v2beta.services.conference_records_service import pagers +from google.apps.meet_v2beta.types import resource, service + +from .client import ConferenceRecordsServiceClient +from .transports.base import DEFAULT_CLIENT_INFO, ConferenceRecordsServiceTransport +from .transports.grpc_asyncio import ConferenceRecordsServiceGrpcAsyncIOTransport + + +class ConferenceRecordsServiceAsyncClient: + """REST API for services dealing with conference records.""" + + _client: ConferenceRecordsServiceClient + + DEFAULT_ENDPOINT = ConferenceRecordsServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = ConferenceRecordsServiceClient.DEFAULT_MTLS_ENDPOINT + + conference_record_path = staticmethod( + ConferenceRecordsServiceClient.conference_record_path + ) + parse_conference_record_path = staticmethod( + ConferenceRecordsServiceClient.parse_conference_record_path + ) + participant_path = staticmethod(ConferenceRecordsServiceClient.participant_path) + parse_participant_path = staticmethod( + ConferenceRecordsServiceClient.parse_participant_path + ) + participant_session_path = staticmethod( + ConferenceRecordsServiceClient.participant_session_path + ) + parse_participant_session_path = staticmethod( + ConferenceRecordsServiceClient.parse_participant_session_path + ) + recording_path = staticmethod(ConferenceRecordsServiceClient.recording_path) + parse_recording_path = staticmethod( + ConferenceRecordsServiceClient.parse_recording_path + ) + space_path = staticmethod(ConferenceRecordsServiceClient.space_path) + parse_space_path = staticmethod(ConferenceRecordsServiceClient.parse_space_path) + transcript_path = staticmethod(ConferenceRecordsServiceClient.transcript_path) + parse_transcript_path = staticmethod( + ConferenceRecordsServiceClient.parse_transcript_path + ) + transcript_entry_path = staticmethod( + ConferenceRecordsServiceClient.transcript_entry_path + ) + parse_transcript_entry_path = staticmethod( + ConferenceRecordsServiceClient.parse_transcript_entry_path + ) + common_billing_account_path = staticmethod( + ConferenceRecordsServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + ConferenceRecordsServiceClient.parse_common_billing_account_path + ) + common_folder_path = staticmethod(ConferenceRecordsServiceClient.common_folder_path) + parse_common_folder_path = staticmethod( + ConferenceRecordsServiceClient.parse_common_folder_path + ) + common_organization_path = staticmethod( + ConferenceRecordsServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + ConferenceRecordsServiceClient.parse_common_organization_path + ) + common_project_path = staticmethod( + ConferenceRecordsServiceClient.common_project_path + ) + parse_common_project_path = staticmethod( + ConferenceRecordsServiceClient.parse_common_project_path + ) + common_location_path = staticmethod( + ConferenceRecordsServiceClient.common_location_path + ) + parse_common_location_path = staticmethod( + ConferenceRecordsServiceClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ConferenceRecordsServiceAsyncClient: The constructed client. + """ + return ConferenceRecordsServiceClient.from_service_account_info.__func__(ConferenceRecordsServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ConferenceRecordsServiceAsyncClient: The constructed client. + """ + return ConferenceRecordsServiceClient.from_service_account_file.__func__(ConferenceRecordsServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return ConferenceRecordsServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> ConferenceRecordsServiceTransport: + """Returns the transport used by the client instance. + + Returns: + ConferenceRecordsServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial( + type(ConferenceRecordsServiceClient).get_transport_class, + type(ConferenceRecordsServiceClient), + ) + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, ConferenceRecordsServiceTransport] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the conference records service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.ConferenceRecordsServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = ConferenceRecordsServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def get_conference_record( + self, + request: Optional[Union[service.GetConferenceRecordRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resource.ConferenceRecord: + r"""`Developer + Preview `__. + Gets a conference record by conference ID. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.apps import meet_v2beta + + async def sample_get_conference_record(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceAsyncClient() + + # Initialize request argument(s) + request = meet_v2beta.GetConferenceRecordRequest( + name="name_value", + ) + + # Make the request + response = await client.get_conference_record(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.apps.meet_v2beta.types.GetConferenceRecordRequest, dict]]): + The request object. Request to get a conference record. + name (:class:`str`): + Required. Resource name of the + conference. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.apps.meet_v2beta.types.ConferenceRecord: + [Developer Preview](\ https://developers.google.com/workspace/preview). + Single instance of a meeting held in a space. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.GetConferenceRecordRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_conference_record, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_conference_records( + self, + request: Optional[Union[service.ListConferenceRecordsRequest, dict]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListConferenceRecordsAsyncPager: + r"""`Developer + Preview `__. + Lists the conference records by start time and in descending + order. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.apps import meet_v2beta + + async def sample_list_conference_records(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceAsyncClient() + + # Initialize request argument(s) + request = meet_v2beta.ListConferenceRecordsRequest( + ) + + # Make the request + page_result = client.list_conference_records(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.apps.meet_v2beta.types.ListConferenceRecordsRequest, dict]]): + The request object. Request to fetch list of conference + records per user. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.apps.meet_v2beta.services.conference_records_service.pagers.ListConferenceRecordsAsyncPager: + Response of ListConferenceRecords + method. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + request = service.ListConferenceRecordsRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_conference_records, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListConferenceRecordsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_participant( + self, + request: Optional[Union[service.GetParticipantRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resource.Participant: + r"""`Developer + Preview `__. + Gets a participant by participant ID. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.apps import meet_v2beta + + async def sample_get_participant(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceAsyncClient() + + # Initialize request argument(s) + request = meet_v2beta.GetParticipantRequest( + name="name_value", + ) + + # Make the request + response = await client.get_participant(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.apps.meet_v2beta.types.GetParticipantRequest, dict]]): + The request object. Request to get a Participant. + name (:class:`str`): + Required. Resource name of the + participant. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.apps.meet_v2beta.types.Participant: + [Developer Preview](\ https://developers.google.com/workspace/preview). + User who attended or is attending a conference. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.GetParticipantRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_participant, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_participants( + self, + request: Optional[Union[service.ListParticipantsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListParticipantsAsyncPager: + r"""`Developer + Preview `__. + Lists the participants in a conference record, by default + ordered by join time and in descending order. This API supports + ``fields`` as standard parameters like every other API. However, + when the ``fields`` request parameter is omitted, this API + defaults to ``'participants/*, next_page_token'``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.apps import meet_v2beta + + async def sample_list_participants(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceAsyncClient() + + # Initialize request argument(s) + request = meet_v2beta.ListParticipantsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_participants(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.apps.meet_v2beta.types.ListParticipantsRequest, dict]]): + The request object. Request to fetch list of participant + per conference. + parent (:class:`str`): + Required. Format: + ``conferenceRecords/{conference_record}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.apps.meet_v2beta.services.conference_records_service.pagers.ListParticipantsAsyncPager: + Response of ListParticipants method. + + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.ListParticipantsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_participants, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListParticipantsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_participant_session( + self, + request: Optional[Union[service.GetParticipantSessionRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resource.ParticipantSession: + r"""`Developer + Preview `__. + Gets a participant session by participant session ID. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.apps import meet_v2beta + + async def sample_get_participant_session(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceAsyncClient() + + # Initialize request argument(s) + request = meet_v2beta.GetParticipantSessionRequest( + name="name_value", + ) + + # Make the request + response = await client.get_participant_session(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.apps.meet_v2beta.types.GetParticipantSessionRequest, dict]]): + The request object. Request to get a participant session. + name (:class:`str`): + Required. Resource name of the + participant. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.apps.meet_v2beta.types.ParticipantSession: + [Developer Preview](\ https://developers.google.com/workspace/preview). + Refers to each unique join/leave session when a user + joins a conference from a device. Note that any time + a user joins the conference a new unique ID is + assigned. That means if a user joins a space multiple + times from the same device, they're assigned + different IDs, and are also be treated as different + participant sessions. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.GetParticipantSessionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_participant_session, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_participant_sessions( + self, + request: Optional[Union[service.ListParticipantSessionsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListParticipantSessionsAsyncPager: + r"""`Developer + Preview `__. + Lists the participant sessions of a participant in a conference + record, by default ordered by join time and in descending order. + This API supports ``fields`` as standard parameters like every + other API. However, when the ``fields`` request parameter is + omitted this API defaults to + ``'participantsessions/*, next_page_token'``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.apps import meet_v2beta + + async def sample_list_participant_sessions(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceAsyncClient() + + # Initialize request argument(s) + request = meet_v2beta.ListParticipantSessionsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_participant_sessions(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.apps.meet_v2beta.types.ListParticipantSessionsRequest, dict]]): + The request object. Request to fetch list of participant + sessions per conference record per + participant. + parent (:class:`str`): + Required. Format: + ``conferenceRecords/{conference_record}/participants/{participant}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.apps.meet_v2beta.services.conference_records_service.pagers.ListParticipantSessionsAsyncPager: + Response of ListParticipants method. + + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.ListParticipantSessionsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_participant_sessions, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListParticipantSessionsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_recording( + self, + request: Optional[Union[service.GetRecordingRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resource.Recording: + r"""`Developer + Preview `__. + Gets a recording by recording ID. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.apps import meet_v2beta + + async def sample_get_recording(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceAsyncClient() + + # Initialize request argument(s) + request = meet_v2beta.GetRecordingRequest( + name="name_value", + ) + + # Make the request + response = await client.get_recording(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.apps.meet_v2beta.types.GetRecordingRequest, dict]]): + The request object. Request message for GetRecording + method. + name (:class:`str`): + Required. Resource name of the + recording. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.apps.meet_v2beta.types.Recording: + [Developer Preview](\ https://developers.google.com/workspace/preview). + Metadata about a recording created during a + conference. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.GetRecordingRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_recording, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_recordings( + self, + request: Optional[Union[service.ListRecordingsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListRecordingsAsyncPager: + r"""`Developer + Preview `__. + Lists the recording resources from the conference record. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.apps import meet_v2beta + + async def sample_list_recordings(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceAsyncClient() + + # Initialize request argument(s) + request = meet_v2beta.ListRecordingsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_recordings(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.apps.meet_v2beta.types.ListRecordingsRequest, dict]]): + The request object. Request for ListRecordings method. + parent (:class:`str`): + Required. Format: + ``conferenceRecords/{conference_record}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.apps.meet_v2beta.services.conference_records_service.pagers.ListRecordingsAsyncPager: + Response for ListRecordings method. + + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.ListRecordingsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_recordings, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListRecordingsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_transcript( + self, + request: Optional[Union[service.GetTranscriptRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resource.Transcript: + r"""`Developer + Preview `__. + Gets a transcript by transcript ID. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.apps import meet_v2beta + + async def sample_get_transcript(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceAsyncClient() + + # Initialize request argument(s) + request = meet_v2beta.GetTranscriptRequest( + name="name_value", + ) + + # Make the request + response = await client.get_transcript(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.apps.meet_v2beta.types.GetTranscriptRequest, dict]]): + The request object. Request for GetTranscript method. + name (:class:`str`): + Required. Resource name of the + transcript. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.apps.meet_v2beta.types.Transcript: + [Developer Preview](\ https://developers.google.com/workspace/preview). + Metadata for a transcript generated from a + conference. It refers to the ASR (Automatic Speech + Recognition) result of user's speech during the + conference. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.GetTranscriptRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_transcript, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_transcripts( + self, + request: Optional[Union[service.ListTranscriptsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTranscriptsAsyncPager: + r"""`Developer + Preview `__. + Lists the set of transcripts from the conference record. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.apps import meet_v2beta + + async def sample_list_transcripts(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceAsyncClient() + + # Initialize request argument(s) + request = meet_v2beta.ListTranscriptsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_transcripts(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.apps.meet_v2beta.types.ListTranscriptsRequest, dict]]): + The request object. Request for ListTranscripts method. + parent (:class:`str`): + Required. Format: + ``conferenceRecords/{conference_record}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.apps.meet_v2beta.services.conference_records_service.pagers.ListTranscriptsAsyncPager: + Response for ListTranscripts method. + + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.ListTranscriptsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_transcripts, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListTranscriptsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_transcript_entry( + self, + request: Optional[Union[service.GetTranscriptEntryRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resource.TranscriptEntry: + r"""`Developer + Preview `__. + Gets a ``TranscriptEntry`` resource by entry ID. + + Note: The transcript entries returned by the Google Meet API + might not match the transcription found in the Google Docs + transcript file. This can occur when the Google Docs transcript + file is modified after generation. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.apps import meet_v2beta + + async def sample_get_transcript_entry(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceAsyncClient() + + # Initialize request argument(s) + request = meet_v2beta.GetTranscriptEntryRequest( + name="name_value", + ) + + # Make the request + response = await client.get_transcript_entry(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.apps.meet_v2beta.types.GetTranscriptEntryRequest, dict]]): + The request object. Request for GetTranscriptEntry + method. + name (:class:`str`): + Required. Resource name of the ``TranscriptEntry``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.apps.meet_v2beta.types.TranscriptEntry: + [Developer Preview](\ https://developers.google.com/workspace/preview). + Single entry for one user’s speech during a + transcript session. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.GetTranscriptEntryRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_transcript_entry, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_transcript_entries( + self, + request: Optional[Union[service.ListTranscriptEntriesRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTranscriptEntriesAsyncPager: + r"""`Developer + Preview `__. + Lists the structured transcript entries per transcript. By + default, ordered by start time and in ascending order. + + Note: The transcript entries returned by the Google Meet API + might not match the transcription found in the Google Docs + transcript file. This can occur when the Google Docs transcript + file is modified after generation. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.apps import meet_v2beta + + async def sample_list_transcript_entries(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceAsyncClient() + + # Initialize request argument(s) + request = meet_v2beta.ListTranscriptEntriesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_transcript_entries(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.apps.meet_v2beta.types.ListTranscriptEntriesRequest, dict]]): + The request object. Request for ListTranscriptEntries + method. + parent (:class:`str`): + Required. Format: + ``conferenceRecords/{conference_record}/transcripts/{transcript}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.apps.meet_v2beta.services.conference_records_service.pagers.ListTranscriptEntriesAsyncPager: + Response for ListTranscriptEntries + method + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.ListTranscriptEntriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_transcript_entries, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListTranscriptEntriesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def __aenter__(self) -> "ConferenceRecordsServiceAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("ConferenceRecordsServiceAsyncClient",) diff --git a/packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/client.py b/packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/client.py new file mode 100644 index 000000000000..cb4c9c4143e4 --- /dev/null +++ b/packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/client.py @@ -0,0 +1,1906 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.apps.meet_v2beta import gapic_version as package_version + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + +from google.apps.meet_v2beta.services.conference_records_service import pagers +from google.apps.meet_v2beta.types import resource, service + +from .transports.base import DEFAULT_CLIENT_INFO, ConferenceRecordsServiceTransport +from .transports.grpc import ConferenceRecordsServiceGrpcTransport +from .transports.grpc_asyncio import ConferenceRecordsServiceGrpcAsyncIOTransport +from .transports.rest import ConferenceRecordsServiceRestTransport + + +class ConferenceRecordsServiceClientMeta(type): + """Metaclass for the ConferenceRecordsService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[ConferenceRecordsServiceTransport]] + _transport_registry["grpc"] = ConferenceRecordsServiceGrpcTransport + _transport_registry["grpc_asyncio"] = ConferenceRecordsServiceGrpcAsyncIOTransport + _transport_registry["rest"] = ConferenceRecordsServiceRestTransport + + def get_transport_class( + cls, + label: Optional[str] = None, + ) -> Type[ConferenceRecordsServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class ConferenceRecordsServiceClient(metaclass=ConferenceRecordsServiceClientMeta): + """REST API for services dealing with conference records.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "meet.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ConferenceRecordsServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ConferenceRecordsServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> ConferenceRecordsServiceTransport: + """Returns the transport used by the client instance. + + Returns: + ConferenceRecordsServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def conference_record_path( + conference_record: str, + ) -> str: + """Returns a fully-qualified conference_record string.""" + return "conferenceRecords/{conference_record}".format( + conference_record=conference_record, + ) + + @staticmethod + def parse_conference_record_path(path: str) -> Dict[str, str]: + """Parses a conference_record path into its component segments.""" + m = re.match(r"^conferenceRecords/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def participant_path( + conference_record: str, + participant: str, + ) -> str: + """Returns a fully-qualified participant string.""" + return ( + "conferenceRecords/{conference_record}/participants/{participant}".format( + conference_record=conference_record, + participant=participant, + ) + ) + + @staticmethod + def parse_participant_path(path: str) -> Dict[str, str]: + """Parses a participant path into its component segments.""" + m = re.match( + r"^conferenceRecords/(?P.+?)/participants/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def participant_session_path( + conference_record: str, + participant: str, + participant_session: str, + ) -> str: + """Returns a fully-qualified participant_session string.""" + return "conferenceRecords/{conference_record}/participants/{participant}/participantSessions/{participant_session}".format( + conference_record=conference_record, + participant=participant, + participant_session=participant_session, + ) + + @staticmethod + def parse_participant_session_path(path: str) -> Dict[str, str]: + """Parses a participant_session path into its component segments.""" + m = re.match( + r"^conferenceRecords/(?P.+?)/participants/(?P.+?)/participantSessions/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def recording_path( + conference_record: str, + recording: str, + ) -> str: + """Returns a fully-qualified recording string.""" + return "conferenceRecords/{conference_record}/recordings/{recording}".format( + conference_record=conference_record, + recording=recording, + ) + + @staticmethod + def parse_recording_path(path: str) -> Dict[str, str]: + """Parses a recording path into its component segments.""" + m = re.match( + r"^conferenceRecords/(?P.+?)/recordings/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def space_path( + space: str, + ) -> str: + """Returns a fully-qualified space string.""" + return "spaces/{space}".format( + space=space, + ) + + @staticmethod + def parse_space_path(path: str) -> Dict[str, str]: + """Parses a space path into its component segments.""" + m = re.match(r"^spaces/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def transcript_path( + conference_record: str, + transcript: str, + ) -> str: + """Returns a fully-qualified transcript string.""" + return "conferenceRecords/{conference_record}/transcripts/{transcript}".format( + conference_record=conference_record, + transcript=transcript, + ) + + @staticmethod + def parse_transcript_path(path: str) -> Dict[str, str]: + """Parses a transcript path into its component segments.""" + m = re.match( + r"^conferenceRecords/(?P.+?)/transcripts/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def transcript_entry_path( + conference_record: str, + transcript: str, + entry: str, + ) -> str: + """Returns a fully-qualified transcript_entry string.""" + return "conferenceRecords/{conference_record}/transcripts/{transcript}/entries/{entry}".format( + conference_record=conference_record, + transcript=transcript, + entry=entry, + ) + + @staticmethod + def parse_transcript_entry_path(path: str) -> Dict[str, str]: + """Parses a transcript_entry path into its component segments.""" + m = re.match( + r"^conferenceRecords/(?P.+?)/transcripts/(?P.+?)/entries/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path( + billing_account: str, + ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path( + folder: str, + ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format( + folder=folder, + ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path( + organization: str, + ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format( + organization=organization, + ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path( + project: str, + ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format( + project=project, + ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path( + project: str, + location: str, + ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[client_options_lib.ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[Union[str, ConferenceRecordsServiceTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the conference records service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ConferenceRecordsServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + client_options = cast(client_options_lib.ClientOptions, client_options) + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( + client_options + ) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError( + "client_options.api_key and credentials are mutually exclusive" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, ConferenceRecordsServiceTransport): + # transport is a ConferenceRecordsServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr( + google.auth._default, "get_api_key_credentials" + ): + credentials = google.auth._default.get_api_key_credentials( + api_key_value + ) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=client_options.api_audience, + ) + + def get_conference_record( + self, + request: Optional[Union[service.GetConferenceRecordRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resource.ConferenceRecord: + r"""`Developer + Preview `__. + Gets a conference record by conference ID. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.apps import meet_v2beta + + def sample_get_conference_record(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceClient() + + # Initialize request argument(s) + request = meet_v2beta.GetConferenceRecordRequest( + name="name_value", + ) + + # Make the request + response = client.get_conference_record(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.apps.meet_v2beta.types.GetConferenceRecordRequest, dict]): + The request object. Request to get a conference record. + name (str): + Required. Resource name of the + conference. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.apps.meet_v2beta.types.ConferenceRecord: + [Developer Preview](\ https://developers.google.com/workspace/preview). + Single instance of a meeting held in a space. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.GetConferenceRecordRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.GetConferenceRecordRequest): + request = service.GetConferenceRecordRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_conference_record] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_conference_records( + self, + request: Optional[Union[service.ListConferenceRecordsRequest, dict]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListConferenceRecordsPager: + r"""`Developer + Preview `__. + Lists the conference records by start time and in descending + order. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.apps import meet_v2beta + + def sample_list_conference_records(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceClient() + + # Initialize request argument(s) + request = meet_v2beta.ListConferenceRecordsRequest( + ) + + # Make the request + page_result = client.list_conference_records(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.apps.meet_v2beta.types.ListConferenceRecordsRequest, dict]): + The request object. Request to fetch list of conference + records per user. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.apps.meet_v2beta.services.conference_records_service.pagers.ListConferenceRecordsPager: + Response of ListConferenceRecords + method. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a service.ListConferenceRecordsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ListConferenceRecordsRequest): + request = service.ListConferenceRecordsRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_conference_records] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListConferenceRecordsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_participant( + self, + request: Optional[Union[service.GetParticipantRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resource.Participant: + r"""`Developer + Preview `__. + Gets a participant by participant ID. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.apps import meet_v2beta + + def sample_get_participant(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceClient() + + # Initialize request argument(s) + request = meet_v2beta.GetParticipantRequest( + name="name_value", + ) + + # Make the request + response = client.get_participant(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.apps.meet_v2beta.types.GetParticipantRequest, dict]): + The request object. Request to get a Participant. + name (str): + Required. Resource name of the + participant. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.apps.meet_v2beta.types.Participant: + [Developer Preview](\ https://developers.google.com/workspace/preview). + User who attended or is attending a conference. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.GetParticipantRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.GetParticipantRequest): + request = service.GetParticipantRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_participant] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_participants( + self, + request: Optional[Union[service.ListParticipantsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListParticipantsPager: + r"""`Developer + Preview `__. + Lists the participants in a conference record, by default + ordered by join time and in descending order. This API supports + ``fields`` as standard parameters like every other API. However, + when the ``fields`` request parameter is omitted, this API + defaults to ``'participants/*, next_page_token'``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.apps import meet_v2beta + + def sample_list_participants(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceClient() + + # Initialize request argument(s) + request = meet_v2beta.ListParticipantsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_participants(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.apps.meet_v2beta.types.ListParticipantsRequest, dict]): + The request object. Request to fetch list of participant + per conference. + parent (str): + Required. Format: + ``conferenceRecords/{conference_record}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.apps.meet_v2beta.services.conference_records_service.pagers.ListParticipantsPager: + Response of ListParticipants method. + + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.ListParticipantsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ListParticipantsRequest): + request = service.ListParticipantsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_participants] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListParticipantsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_participant_session( + self, + request: Optional[Union[service.GetParticipantSessionRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resource.ParticipantSession: + r"""`Developer + Preview `__. + Gets a participant session by participant session ID. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.apps import meet_v2beta + + def sample_get_participant_session(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceClient() + + # Initialize request argument(s) + request = meet_v2beta.GetParticipantSessionRequest( + name="name_value", + ) + + # Make the request + response = client.get_participant_session(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.apps.meet_v2beta.types.GetParticipantSessionRequest, dict]): + The request object. Request to get a participant session. + name (str): + Required. Resource name of the + participant. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.apps.meet_v2beta.types.ParticipantSession: + [Developer Preview](\ https://developers.google.com/workspace/preview). + Refers to each unique join/leave session when a user + joins a conference from a device. Note that any time + a user joins the conference a new unique ID is + assigned. That means if a user joins a space multiple + times from the same device, they're assigned + different IDs, and are also be treated as different + participant sessions. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.GetParticipantSessionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.GetParticipantSessionRequest): + request = service.GetParticipantSessionRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_participant_session] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_participant_sessions( + self, + request: Optional[Union[service.ListParticipantSessionsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListParticipantSessionsPager: + r"""`Developer + Preview `__. + Lists the participant sessions of a participant in a conference + record, by default ordered by join time and in descending order. + This API supports ``fields`` as standard parameters like every + other API. However, when the ``fields`` request parameter is + omitted this API defaults to + ``'participantsessions/*, next_page_token'``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.apps import meet_v2beta + + def sample_list_participant_sessions(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceClient() + + # Initialize request argument(s) + request = meet_v2beta.ListParticipantSessionsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_participant_sessions(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.apps.meet_v2beta.types.ListParticipantSessionsRequest, dict]): + The request object. Request to fetch list of participant + sessions per conference record per + participant. + parent (str): + Required. Format: + ``conferenceRecords/{conference_record}/participants/{participant}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.apps.meet_v2beta.services.conference_records_service.pagers.ListParticipantSessionsPager: + Response of ListParticipants method. + + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.ListParticipantSessionsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ListParticipantSessionsRequest): + request = service.ListParticipantSessionsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.list_participant_sessions + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListParticipantSessionsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_recording( + self, + request: Optional[Union[service.GetRecordingRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resource.Recording: + r"""`Developer + Preview `__. + Gets a recording by recording ID. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.apps import meet_v2beta + + def sample_get_recording(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceClient() + + # Initialize request argument(s) + request = meet_v2beta.GetRecordingRequest( + name="name_value", + ) + + # Make the request + response = client.get_recording(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.apps.meet_v2beta.types.GetRecordingRequest, dict]): + The request object. Request message for GetRecording + method. + name (str): + Required. Resource name of the + recording. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.apps.meet_v2beta.types.Recording: + [Developer Preview](\ https://developers.google.com/workspace/preview). + Metadata about a recording created during a + conference. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.GetRecordingRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.GetRecordingRequest): + request = service.GetRecordingRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_recording] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_recordings( + self, + request: Optional[Union[service.ListRecordingsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListRecordingsPager: + r"""`Developer + Preview `__. + Lists the recording resources from the conference record. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.apps import meet_v2beta + + def sample_list_recordings(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceClient() + + # Initialize request argument(s) + request = meet_v2beta.ListRecordingsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_recordings(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.apps.meet_v2beta.types.ListRecordingsRequest, dict]): + The request object. Request for ListRecordings method. + parent (str): + Required. Format: + ``conferenceRecords/{conference_record}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.apps.meet_v2beta.services.conference_records_service.pagers.ListRecordingsPager: + Response for ListRecordings method. + + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.ListRecordingsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ListRecordingsRequest): + request = service.ListRecordingsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_recordings] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListRecordingsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_transcript( + self, + request: Optional[Union[service.GetTranscriptRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resource.Transcript: + r"""`Developer + Preview `__. + Gets a transcript by transcript ID. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.apps import meet_v2beta + + def sample_get_transcript(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceClient() + + # Initialize request argument(s) + request = meet_v2beta.GetTranscriptRequest( + name="name_value", + ) + + # Make the request + response = client.get_transcript(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.apps.meet_v2beta.types.GetTranscriptRequest, dict]): + The request object. Request for GetTranscript method. + name (str): + Required. Resource name of the + transcript. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.apps.meet_v2beta.types.Transcript: + [Developer Preview](\ https://developers.google.com/workspace/preview). + Metadata for a transcript generated from a + conference. It refers to the ASR (Automatic Speech + Recognition) result of user's speech during the + conference. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.GetTranscriptRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.GetTranscriptRequest): + request = service.GetTranscriptRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_transcript] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_transcripts( + self, + request: Optional[Union[service.ListTranscriptsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTranscriptsPager: + r"""`Developer + Preview `__. + Lists the set of transcripts from the conference record. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.apps import meet_v2beta + + def sample_list_transcripts(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceClient() + + # Initialize request argument(s) + request = meet_v2beta.ListTranscriptsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_transcripts(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.apps.meet_v2beta.types.ListTranscriptsRequest, dict]): + The request object. Request for ListTranscripts method. + parent (str): + Required. Format: + ``conferenceRecords/{conference_record}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.apps.meet_v2beta.services.conference_records_service.pagers.ListTranscriptsPager: + Response for ListTranscripts method. + + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.ListTranscriptsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ListTranscriptsRequest): + request = service.ListTranscriptsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_transcripts] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListTranscriptsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_transcript_entry( + self, + request: Optional[Union[service.GetTranscriptEntryRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resource.TranscriptEntry: + r"""`Developer + Preview `__. + Gets a ``TranscriptEntry`` resource by entry ID. + + Note: The transcript entries returned by the Google Meet API + might not match the transcription found in the Google Docs + transcript file. This can occur when the Google Docs transcript + file is modified after generation. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.apps import meet_v2beta + + def sample_get_transcript_entry(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceClient() + + # Initialize request argument(s) + request = meet_v2beta.GetTranscriptEntryRequest( + name="name_value", + ) + + # Make the request + response = client.get_transcript_entry(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.apps.meet_v2beta.types.GetTranscriptEntryRequest, dict]): + The request object. Request for GetTranscriptEntry + method. + name (str): + Required. Resource name of the ``TranscriptEntry``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.apps.meet_v2beta.types.TranscriptEntry: + [Developer Preview](\ https://developers.google.com/workspace/preview). + Single entry for one user’s speech during a + transcript session. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.GetTranscriptEntryRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.GetTranscriptEntryRequest): + request = service.GetTranscriptEntryRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_transcript_entry] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_transcript_entries( + self, + request: Optional[Union[service.ListTranscriptEntriesRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTranscriptEntriesPager: + r"""`Developer + Preview `__. + Lists the structured transcript entries per transcript. By + default, ordered by start time and in ascending order. + + Note: The transcript entries returned by the Google Meet API + might not match the transcription found in the Google Docs + transcript file. This can occur when the Google Docs transcript + file is modified after generation. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.apps import meet_v2beta + + def sample_list_transcript_entries(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceClient() + + # Initialize request argument(s) + request = meet_v2beta.ListTranscriptEntriesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_transcript_entries(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.apps.meet_v2beta.types.ListTranscriptEntriesRequest, dict]): + The request object. Request for ListTranscriptEntries + method. + parent (str): + Required. Format: + ``conferenceRecords/{conference_record}/transcripts/{transcript}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.apps.meet_v2beta.services.conference_records_service.pagers.ListTranscriptEntriesPager: + Response for ListTranscriptEntries + method + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.ListTranscriptEntriesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ListTranscriptEntriesRequest): + request = service.ListTranscriptEntriesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_transcript_entries] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListTranscriptEntriesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self) -> "ConferenceRecordsServiceClient": + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("ConferenceRecordsServiceClient",) diff --git a/packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/pagers.py b/packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/pagers.py new file mode 100644 index 000000000000..1e8be9a578bc --- /dev/null +++ b/packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/pagers.py @@ -0,0 +1,795 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import ( + Any, + AsyncIterator, + Awaitable, + Callable, + Iterator, + Optional, + Sequence, + Tuple, +) + +from google.apps.meet_v2beta.types import resource, service + + +class ListConferenceRecordsPager: + """A pager for iterating through ``list_conference_records`` requests. + + This class thinly wraps an initial + :class:`google.apps.meet_v2beta.types.ListConferenceRecordsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``conference_records`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListConferenceRecords`` requests and continue to iterate + through the ``conference_records`` field on the + corresponding responses. + + All the usual :class:`google.apps.meet_v2beta.types.ListConferenceRecordsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., service.ListConferenceRecordsResponse], + request: service.ListConferenceRecordsRequest, + response: service.ListConferenceRecordsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.apps.meet_v2beta.types.ListConferenceRecordsRequest): + The initial request object. + response (google.apps.meet_v2beta.types.ListConferenceRecordsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListConferenceRecordsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[service.ListConferenceRecordsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[resource.ConferenceRecord]: + for page in self.pages: + yield from page.conference_records + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListConferenceRecordsAsyncPager: + """A pager for iterating through ``list_conference_records`` requests. + + This class thinly wraps an initial + :class:`google.apps.meet_v2beta.types.ListConferenceRecordsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``conference_records`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListConferenceRecords`` requests and continue to iterate + through the ``conference_records`` field on the + corresponding responses. + + All the usual :class:`google.apps.meet_v2beta.types.ListConferenceRecordsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[service.ListConferenceRecordsResponse]], + request: service.ListConferenceRecordsRequest, + response: service.ListConferenceRecordsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.apps.meet_v2beta.types.ListConferenceRecordsRequest): + The initial request object. + response (google.apps.meet_v2beta.types.ListConferenceRecordsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListConferenceRecordsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[service.ListConferenceRecordsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[resource.ConferenceRecord]: + async def async_generator(): + async for page in self.pages: + for response in page.conference_records: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListParticipantsPager: + """A pager for iterating through ``list_participants`` requests. + + This class thinly wraps an initial + :class:`google.apps.meet_v2beta.types.ListParticipantsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``participants`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListParticipants`` requests and continue to iterate + through the ``participants`` field on the + corresponding responses. + + All the usual :class:`google.apps.meet_v2beta.types.ListParticipantsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., service.ListParticipantsResponse], + request: service.ListParticipantsRequest, + response: service.ListParticipantsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.apps.meet_v2beta.types.ListParticipantsRequest): + The initial request object. + response (google.apps.meet_v2beta.types.ListParticipantsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListParticipantsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[service.ListParticipantsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[resource.Participant]: + for page in self.pages: + yield from page.participants + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListParticipantsAsyncPager: + """A pager for iterating through ``list_participants`` requests. + + This class thinly wraps an initial + :class:`google.apps.meet_v2beta.types.ListParticipantsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``participants`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListParticipants`` requests and continue to iterate + through the ``participants`` field on the + corresponding responses. + + All the usual :class:`google.apps.meet_v2beta.types.ListParticipantsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[service.ListParticipantsResponse]], + request: service.ListParticipantsRequest, + response: service.ListParticipantsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.apps.meet_v2beta.types.ListParticipantsRequest): + The initial request object. + response (google.apps.meet_v2beta.types.ListParticipantsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListParticipantsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[service.ListParticipantsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[resource.Participant]: + async def async_generator(): + async for page in self.pages: + for response in page.participants: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListParticipantSessionsPager: + """A pager for iterating through ``list_participant_sessions`` requests. + + This class thinly wraps an initial + :class:`google.apps.meet_v2beta.types.ListParticipantSessionsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``participant_sessions`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListParticipantSessions`` requests and continue to iterate + through the ``participant_sessions`` field on the + corresponding responses. + + All the usual :class:`google.apps.meet_v2beta.types.ListParticipantSessionsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., service.ListParticipantSessionsResponse], + request: service.ListParticipantSessionsRequest, + response: service.ListParticipantSessionsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.apps.meet_v2beta.types.ListParticipantSessionsRequest): + The initial request object. + response (google.apps.meet_v2beta.types.ListParticipantSessionsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListParticipantSessionsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[service.ListParticipantSessionsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[resource.ParticipantSession]: + for page in self.pages: + yield from page.participant_sessions + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListParticipantSessionsAsyncPager: + """A pager for iterating through ``list_participant_sessions`` requests. + + This class thinly wraps an initial + :class:`google.apps.meet_v2beta.types.ListParticipantSessionsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``participant_sessions`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListParticipantSessions`` requests and continue to iterate + through the ``participant_sessions`` field on the + corresponding responses. + + All the usual :class:`google.apps.meet_v2beta.types.ListParticipantSessionsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[service.ListParticipantSessionsResponse]], + request: service.ListParticipantSessionsRequest, + response: service.ListParticipantSessionsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.apps.meet_v2beta.types.ListParticipantSessionsRequest): + The initial request object. + response (google.apps.meet_v2beta.types.ListParticipantSessionsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListParticipantSessionsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[service.ListParticipantSessionsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[resource.ParticipantSession]: + async def async_generator(): + async for page in self.pages: + for response in page.participant_sessions: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListRecordingsPager: + """A pager for iterating through ``list_recordings`` requests. + + This class thinly wraps an initial + :class:`google.apps.meet_v2beta.types.ListRecordingsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``recordings`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListRecordings`` requests and continue to iterate + through the ``recordings`` field on the + corresponding responses. + + All the usual :class:`google.apps.meet_v2beta.types.ListRecordingsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., service.ListRecordingsResponse], + request: service.ListRecordingsRequest, + response: service.ListRecordingsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.apps.meet_v2beta.types.ListRecordingsRequest): + The initial request object. + response (google.apps.meet_v2beta.types.ListRecordingsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListRecordingsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[service.ListRecordingsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[resource.Recording]: + for page in self.pages: + yield from page.recordings + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListRecordingsAsyncPager: + """A pager for iterating through ``list_recordings`` requests. + + This class thinly wraps an initial + :class:`google.apps.meet_v2beta.types.ListRecordingsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``recordings`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListRecordings`` requests and continue to iterate + through the ``recordings`` field on the + corresponding responses. + + All the usual :class:`google.apps.meet_v2beta.types.ListRecordingsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[service.ListRecordingsResponse]], + request: service.ListRecordingsRequest, + response: service.ListRecordingsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.apps.meet_v2beta.types.ListRecordingsRequest): + The initial request object. + response (google.apps.meet_v2beta.types.ListRecordingsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListRecordingsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[service.ListRecordingsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[resource.Recording]: + async def async_generator(): + async for page in self.pages: + for response in page.recordings: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListTranscriptsPager: + """A pager for iterating through ``list_transcripts`` requests. + + This class thinly wraps an initial + :class:`google.apps.meet_v2beta.types.ListTranscriptsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``transcripts`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListTranscripts`` requests and continue to iterate + through the ``transcripts`` field on the + corresponding responses. + + All the usual :class:`google.apps.meet_v2beta.types.ListTranscriptsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., service.ListTranscriptsResponse], + request: service.ListTranscriptsRequest, + response: service.ListTranscriptsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.apps.meet_v2beta.types.ListTranscriptsRequest): + The initial request object. + response (google.apps.meet_v2beta.types.ListTranscriptsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListTranscriptsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[service.ListTranscriptsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[resource.Transcript]: + for page in self.pages: + yield from page.transcripts + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListTranscriptsAsyncPager: + """A pager for iterating through ``list_transcripts`` requests. + + This class thinly wraps an initial + :class:`google.apps.meet_v2beta.types.ListTranscriptsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``transcripts`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListTranscripts`` requests and continue to iterate + through the ``transcripts`` field on the + corresponding responses. + + All the usual :class:`google.apps.meet_v2beta.types.ListTranscriptsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[service.ListTranscriptsResponse]], + request: service.ListTranscriptsRequest, + response: service.ListTranscriptsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.apps.meet_v2beta.types.ListTranscriptsRequest): + The initial request object. + response (google.apps.meet_v2beta.types.ListTranscriptsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListTranscriptsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[service.ListTranscriptsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[resource.Transcript]: + async def async_generator(): + async for page in self.pages: + for response in page.transcripts: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListTranscriptEntriesPager: + """A pager for iterating through ``list_transcript_entries`` requests. + + This class thinly wraps an initial + :class:`google.apps.meet_v2beta.types.ListTranscriptEntriesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``transcript_entries`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListTranscriptEntries`` requests and continue to iterate + through the ``transcript_entries`` field on the + corresponding responses. + + All the usual :class:`google.apps.meet_v2beta.types.ListTranscriptEntriesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., service.ListTranscriptEntriesResponse], + request: service.ListTranscriptEntriesRequest, + response: service.ListTranscriptEntriesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.apps.meet_v2beta.types.ListTranscriptEntriesRequest): + The initial request object. + response (google.apps.meet_v2beta.types.ListTranscriptEntriesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListTranscriptEntriesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[service.ListTranscriptEntriesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[resource.TranscriptEntry]: + for page in self.pages: + yield from page.transcript_entries + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListTranscriptEntriesAsyncPager: + """A pager for iterating through ``list_transcript_entries`` requests. + + This class thinly wraps an initial + :class:`google.apps.meet_v2beta.types.ListTranscriptEntriesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``transcript_entries`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListTranscriptEntries`` requests and continue to iterate + through the ``transcript_entries`` field on the + corresponding responses. + + All the usual :class:`google.apps.meet_v2beta.types.ListTranscriptEntriesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[service.ListTranscriptEntriesResponse]], + request: service.ListTranscriptEntriesRequest, + response: service.ListTranscriptEntriesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.apps.meet_v2beta.types.ListTranscriptEntriesRequest): + The initial request object. + response (google.apps.meet_v2beta.types.ListTranscriptEntriesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListTranscriptEntriesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[service.ListTranscriptEntriesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[resource.TranscriptEntry]: + async def async_generator(): + async for page in self.pages: + for response in page.transcript_entries: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/transports/__init__.py b/packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/transports/__init__.py new file mode 100644 index 000000000000..f81d2b6fd59b --- /dev/null +++ b/packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/transports/__init__.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import ConferenceRecordsServiceTransport +from .grpc import ConferenceRecordsServiceGrpcTransport +from .grpc_asyncio import ConferenceRecordsServiceGrpcAsyncIOTransport +from .rest import ( + ConferenceRecordsServiceRestInterceptor, + ConferenceRecordsServiceRestTransport, +) + +# Compile a registry of transports. +_transport_registry = ( + OrderedDict() +) # type: Dict[str, Type[ConferenceRecordsServiceTransport]] +_transport_registry["grpc"] = ConferenceRecordsServiceGrpcTransport +_transport_registry["grpc_asyncio"] = ConferenceRecordsServiceGrpcAsyncIOTransport +_transport_registry["rest"] = ConferenceRecordsServiceRestTransport + +__all__ = ( + "ConferenceRecordsServiceTransport", + "ConferenceRecordsServiceGrpcTransport", + "ConferenceRecordsServiceGrpcAsyncIOTransport", + "ConferenceRecordsServiceRestTransport", + "ConferenceRecordsServiceRestInterceptor", +) diff --git a/packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/transports/base.py b/packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/transports/base.py new file mode 100644 index 000000000000..7bc4cbef4d69 --- /dev/null +++ b/packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/transports/base.py @@ -0,0 +1,433 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.apps.meet_v2beta import gapic_version as package_version +from google.apps.meet_v2beta.types import resource, service + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +class ConferenceRecordsServiceTransport(abc.ABC): + """Abstract transport class for ConferenceRecordsService.""" + + AUTH_SCOPES = () + + DEFAULT_HOST: str = "meet.googleapis.com" + + def __init__( + self, + *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default( + **scopes_kwargs, quota_project_id=quota_project_id + ) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience( + api_audience if api_audience else host + ) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if ( + always_use_jwt_access + and isinstance(credentials, service_account.Credentials) + and hasattr(service_account.Credentials, "with_always_use_jwt_access") + ): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.get_conference_record: gapic_v1.method.wrap_method( + self.get_conference_record, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_conference_records: gapic_v1.method.wrap_method( + self.list_conference_records, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_participant: gapic_v1.method.wrap_method( + self.get_participant, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_participants: gapic_v1.method.wrap_method( + self.list_participants, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_participant_session: gapic_v1.method.wrap_method( + self.get_participant_session, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_participant_sessions: gapic_v1.method.wrap_method( + self.list_participant_sessions, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_recording: gapic_v1.method.wrap_method( + self.get_recording, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_recordings: gapic_v1.method.wrap_method( + self.list_recordings, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_transcript: gapic_v1.method.wrap_method( + self.get_transcript, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_transcripts: gapic_v1.method.wrap_method( + self.list_transcripts, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_transcript_entry: gapic_v1.method.wrap_method( + self.get_transcript_entry, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_transcript_entries: gapic_v1.method.wrap_method( + self.list_transcript_entries, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def get_conference_record( + self, + ) -> Callable[ + [service.GetConferenceRecordRequest], + Union[resource.ConferenceRecord, Awaitable[resource.ConferenceRecord]], + ]: + raise NotImplementedError() + + @property + def list_conference_records( + self, + ) -> Callable[ + [service.ListConferenceRecordsRequest], + Union[ + service.ListConferenceRecordsResponse, + Awaitable[service.ListConferenceRecordsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_participant( + self, + ) -> Callable[ + [service.GetParticipantRequest], + Union[resource.Participant, Awaitable[resource.Participant]], + ]: + raise NotImplementedError() + + @property + def list_participants( + self, + ) -> Callable[ + [service.ListParticipantsRequest], + Union[ + service.ListParticipantsResponse, + Awaitable[service.ListParticipantsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_participant_session( + self, + ) -> Callable[ + [service.GetParticipantSessionRequest], + Union[resource.ParticipantSession, Awaitable[resource.ParticipantSession]], + ]: + raise NotImplementedError() + + @property + def list_participant_sessions( + self, + ) -> Callable[ + [service.ListParticipantSessionsRequest], + Union[ + service.ListParticipantSessionsResponse, + Awaitable[service.ListParticipantSessionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_recording( + self, + ) -> Callable[ + [service.GetRecordingRequest], + Union[resource.Recording, Awaitable[resource.Recording]], + ]: + raise NotImplementedError() + + @property + def list_recordings( + self, + ) -> Callable[ + [service.ListRecordingsRequest], + Union[ + service.ListRecordingsResponse, Awaitable[service.ListRecordingsResponse] + ], + ]: + raise NotImplementedError() + + @property + def get_transcript( + self, + ) -> Callable[ + [service.GetTranscriptRequest], + Union[resource.Transcript, Awaitable[resource.Transcript]], + ]: + raise NotImplementedError() + + @property + def list_transcripts( + self, + ) -> Callable[ + [service.ListTranscriptsRequest], + Union[ + service.ListTranscriptsResponse, Awaitable[service.ListTranscriptsResponse] + ], + ]: + raise NotImplementedError() + + @property + def get_transcript_entry( + self, + ) -> Callable[ + [service.GetTranscriptEntryRequest], + Union[resource.TranscriptEntry, Awaitable[resource.TranscriptEntry]], + ]: + raise NotImplementedError() + + @property + def list_transcript_entries( + self, + ) -> Callable[ + [service.ListTranscriptEntriesRequest], + Union[ + service.ListTranscriptEntriesResponse, + Awaitable[service.ListTranscriptEntriesResponse], + ], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ("ConferenceRecordsServiceTransport",) diff --git a/packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/transports/grpc.py b/packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/transports/grpc.py new file mode 100644 index 000000000000..e5c4f7fc1d45 --- /dev/null +++ b/packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/transports/grpc.py @@ -0,0 +1,603 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +from google.api_core import gapic_v1, grpc_helpers +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import grpc # type: ignore + +from google.apps.meet_v2beta.types import resource, service + +from .base import DEFAULT_CLIENT_INFO, ConferenceRecordsServiceTransport + + +class ConferenceRecordsServiceGrpcTransport(ConferenceRecordsServiceTransport): + """gRPC backend transport for ConferenceRecordsService. + + REST API for services dealing with conference records. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "meet.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "meet.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service.""" + return self._grpc_channel + + @property + def get_conference_record( + self, + ) -> Callable[[service.GetConferenceRecordRequest], resource.ConferenceRecord]: + r"""Return a callable for the get conference record method over gRPC. + + `Developer + Preview `__. + Gets a conference record by conference ID. + + Returns: + Callable[[~.GetConferenceRecordRequest], + ~.ConferenceRecord]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_conference_record" not in self._stubs: + self._stubs["get_conference_record"] = self.grpc_channel.unary_unary( + "/google.apps.meet.v2beta.ConferenceRecordsService/GetConferenceRecord", + request_serializer=service.GetConferenceRecordRequest.serialize, + response_deserializer=resource.ConferenceRecord.deserialize, + ) + return self._stubs["get_conference_record"] + + @property + def list_conference_records( + self, + ) -> Callable[ + [service.ListConferenceRecordsRequest], service.ListConferenceRecordsResponse + ]: + r"""Return a callable for the list conference records method over gRPC. + + `Developer + Preview `__. + Lists the conference records by start time and in descending + order. + + Returns: + Callable[[~.ListConferenceRecordsRequest], + ~.ListConferenceRecordsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_conference_records" not in self._stubs: + self._stubs["list_conference_records"] = self.grpc_channel.unary_unary( + "/google.apps.meet.v2beta.ConferenceRecordsService/ListConferenceRecords", + request_serializer=service.ListConferenceRecordsRequest.serialize, + response_deserializer=service.ListConferenceRecordsResponse.deserialize, + ) + return self._stubs["list_conference_records"] + + @property + def get_participant( + self, + ) -> Callable[[service.GetParticipantRequest], resource.Participant]: + r"""Return a callable for the get participant method over gRPC. + + `Developer + Preview `__. + Gets a participant by participant ID. + + Returns: + Callable[[~.GetParticipantRequest], + ~.Participant]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_participant" not in self._stubs: + self._stubs["get_participant"] = self.grpc_channel.unary_unary( + "/google.apps.meet.v2beta.ConferenceRecordsService/GetParticipant", + request_serializer=service.GetParticipantRequest.serialize, + response_deserializer=resource.Participant.deserialize, + ) + return self._stubs["get_participant"] + + @property + def list_participants( + self, + ) -> Callable[[service.ListParticipantsRequest], service.ListParticipantsResponse]: + r"""Return a callable for the list participants method over gRPC. + + `Developer + Preview `__. + Lists the participants in a conference record, by default + ordered by join time and in descending order. This API supports + ``fields`` as standard parameters like every other API. However, + when the ``fields`` request parameter is omitted, this API + defaults to ``'participants/*, next_page_token'``. + + Returns: + Callable[[~.ListParticipantsRequest], + ~.ListParticipantsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_participants" not in self._stubs: + self._stubs["list_participants"] = self.grpc_channel.unary_unary( + "/google.apps.meet.v2beta.ConferenceRecordsService/ListParticipants", + request_serializer=service.ListParticipantsRequest.serialize, + response_deserializer=service.ListParticipantsResponse.deserialize, + ) + return self._stubs["list_participants"] + + @property + def get_participant_session( + self, + ) -> Callable[[service.GetParticipantSessionRequest], resource.ParticipantSession]: + r"""Return a callable for the get participant session method over gRPC. + + `Developer + Preview `__. + Gets a participant session by participant session ID. + + Returns: + Callable[[~.GetParticipantSessionRequest], + ~.ParticipantSession]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_participant_session" not in self._stubs: + self._stubs["get_participant_session"] = self.grpc_channel.unary_unary( + "/google.apps.meet.v2beta.ConferenceRecordsService/GetParticipantSession", + request_serializer=service.GetParticipantSessionRequest.serialize, + response_deserializer=resource.ParticipantSession.deserialize, + ) + return self._stubs["get_participant_session"] + + @property + def list_participant_sessions( + self, + ) -> Callable[ + [service.ListParticipantSessionsRequest], + service.ListParticipantSessionsResponse, + ]: + r"""Return a callable for the list participant sessions method over gRPC. + + `Developer + Preview `__. + Lists the participant sessions of a participant in a conference + record, by default ordered by join time and in descending order. + This API supports ``fields`` as standard parameters like every + other API. However, when the ``fields`` request parameter is + omitted this API defaults to + ``'participantsessions/*, next_page_token'``. + + Returns: + Callable[[~.ListParticipantSessionsRequest], + ~.ListParticipantSessionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_participant_sessions" not in self._stubs: + self._stubs["list_participant_sessions"] = self.grpc_channel.unary_unary( + "/google.apps.meet.v2beta.ConferenceRecordsService/ListParticipantSessions", + request_serializer=service.ListParticipantSessionsRequest.serialize, + response_deserializer=service.ListParticipantSessionsResponse.deserialize, + ) + return self._stubs["list_participant_sessions"] + + @property + def get_recording( + self, + ) -> Callable[[service.GetRecordingRequest], resource.Recording]: + r"""Return a callable for the get recording method over gRPC. + + `Developer + Preview `__. + Gets a recording by recording ID. + + Returns: + Callable[[~.GetRecordingRequest], + ~.Recording]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_recording" not in self._stubs: + self._stubs["get_recording"] = self.grpc_channel.unary_unary( + "/google.apps.meet.v2beta.ConferenceRecordsService/GetRecording", + request_serializer=service.GetRecordingRequest.serialize, + response_deserializer=resource.Recording.deserialize, + ) + return self._stubs["get_recording"] + + @property + def list_recordings( + self, + ) -> Callable[[service.ListRecordingsRequest], service.ListRecordingsResponse]: + r"""Return a callable for the list recordings method over gRPC. + + `Developer + Preview `__. + Lists the recording resources from the conference record. + + Returns: + Callable[[~.ListRecordingsRequest], + ~.ListRecordingsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_recordings" not in self._stubs: + self._stubs["list_recordings"] = self.grpc_channel.unary_unary( + "/google.apps.meet.v2beta.ConferenceRecordsService/ListRecordings", + request_serializer=service.ListRecordingsRequest.serialize, + response_deserializer=service.ListRecordingsResponse.deserialize, + ) + return self._stubs["list_recordings"] + + @property + def get_transcript( + self, + ) -> Callable[[service.GetTranscriptRequest], resource.Transcript]: + r"""Return a callable for the get transcript method over gRPC. + + `Developer + Preview `__. + Gets a transcript by transcript ID. + + Returns: + Callable[[~.GetTranscriptRequest], + ~.Transcript]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_transcript" not in self._stubs: + self._stubs["get_transcript"] = self.grpc_channel.unary_unary( + "/google.apps.meet.v2beta.ConferenceRecordsService/GetTranscript", + request_serializer=service.GetTranscriptRequest.serialize, + response_deserializer=resource.Transcript.deserialize, + ) + return self._stubs["get_transcript"] + + @property + def list_transcripts( + self, + ) -> Callable[[service.ListTranscriptsRequest], service.ListTranscriptsResponse]: + r"""Return a callable for the list transcripts method over gRPC. + + `Developer + Preview `__. + Lists the set of transcripts from the conference record. + + Returns: + Callable[[~.ListTranscriptsRequest], + ~.ListTranscriptsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_transcripts" not in self._stubs: + self._stubs["list_transcripts"] = self.grpc_channel.unary_unary( + "/google.apps.meet.v2beta.ConferenceRecordsService/ListTranscripts", + request_serializer=service.ListTranscriptsRequest.serialize, + response_deserializer=service.ListTranscriptsResponse.deserialize, + ) + return self._stubs["list_transcripts"] + + @property + def get_transcript_entry( + self, + ) -> Callable[[service.GetTranscriptEntryRequest], resource.TranscriptEntry]: + r"""Return a callable for the get transcript entry method over gRPC. + + `Developer + Preview `__. + Gets a ``TranscriptEntry`` resource by entry ID. + + Note: The transcript entries returned by the Google Meet API + might not match the transcription found in the Google Docs + transcript file. This can occur when the Google Docs transcript + file is modified after generation. + + Returns: + Callable[[~.GetTranscriptEntryRequest], + ~.TranscriptEntry]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_transcript_entry" not in self._stubs: + self._stubs["get_transcript_entry"] = self.grpc_channel.unary_unary( + "/google.apps.meet.v2beta.ConferenceRecordsService/GetTranscriptEntry", + request_serializer=service.GetTranscriptEntryRequest.serialize, + response_deserializer=resource.TranscriptEntry.deserialize, + ) + return self._stubs["get_transcript_entry"] + + @property + def list_transcript_entries( + self, + ) -> Callable[ + [service.ListTranscriptEntriesRequest], service.ListTranscriptEntriesResponse + ]: + r"""Return a callable for the list transcript entries method over gRPC. + + `Developer + Preview `__. + Lists the structured transcript entries per transcript. By + default, ordered by start time and in ascending order. + + Note: The transcript entries returned by the Google Meet API + might not match the transcription found in the Google Docs + transcript file. This can occur when the Google Docs transcript + file is modified after generation. + + Returns: + Callable[[~.ListTranscriptEntriesRequest], + ~.ListTranscriptEntriesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_transcript_entries" not in self._stubs: + self._stubs["list_transcript_entries"] = self.grpc_channel.unary_unary( + "/google.apps.meet.v2beta.ConferenceRecordsService/ListTranscriptEntries", + request_serializer=service.ListTranscriptEntriesRequest.serialize, + response_deserializer=service.ListTranscriptEntriesResponse.deserialize, + ) + return self._stubs["list_transcript_entries"] + + def close(self): + self.grpc_channel.close() + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ("ConferenceRecordsServiceGrpcTransport",) diff --git a/packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/transports/grpc_asyncio.py b/packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/transports/grpc_asyncio.py new file mode 100644 index 000000000000..c17c8c9c65ce --- /dev/null +++ b/packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/transports/grpc_asyncio.py @@ -0,0 +1,616 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +from google.api_core import gapic_v1, grpc_helpers_async +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.apps.meet_v2beta.types import resource, service + +from .base import DEFAULT_CLIENT_INFO, ConferenceRecordsServiceTransport +from .grpc import ConferenceRecordsServiceGrpcTransport + + +class ConferenceRecordsServiceGrpcAsyncIOTransport(ConferenceRecordsServiceTransport): + """gRPC AsyncIO backend transport for ConferenceRecordsService. + + REST API for services dealing with conference records. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "meet.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "meet.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def get_conference_record( + self, + ) -> Callable[ + [service.GetConferenceRecordRequest], Awaitable[resource.ConferenceRecord] + ]: + r"""Return a callable for the get conference record method over gRPC. + + `Developer + Preview `__. + Gets a conference record by conference ID. + + Returns: + Callable[[~.GetConferenceRecordRequest], + Awaitable[~.ConferenceRecord]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_conference_record" not in self._stubs: + self._stubs["get_conference_record"] = self.grpc_channel.unary_unary( + "/google.apps.meet.v2beta.ConferenceRecordsService/GetConferenceRecord", + request_serializer=service.GetConferenceRecordRequest.serialize, + response_deserializer=resource.ConferenceRecord.deserialize, + ) + return self._stubs["get_conference_record"] + + @property + def list_conference_records( + self, + ) -> Callable[ + [service.ListConferenceRecordsRequest], + Awaitable[service.ListConferenceRecordsResponse], + ]: + r"""Return a callable for the list conference records method over gRPC. + + `Developer + Preview `__. + Lists the conference records by start time and in descending + order. + + Returns: + Callable[[~.ListConferenceRecordsRequest], + Awaitable[~.ListConferenceRecordsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_conference_records" not in self._stubs: + self._stubs["list_conference_records"] = self.grpc_channel.unary_unary( + "/google.apps.meet.v2beta.ConferenceRecordsService/ListConferenceRecords", + request_serializer=service.ListConferenceRecordsRequest.serialize, + response_deserializer=service.ListConferenceRecordsResponse.deserialize, + ) + return self._stubs["list_conference_records"] + + @property + def get_participant( + self, + ) -> Callable[[service.GetParticipantRequest], Awaitable[resource.Participant]]: + r"""Return a callable for the get participant method over gRPC. + + `Developer + Preview `__. + Gets a participant by participant ID. + + Returns: + Callable[[~.GetParticipantRequest], + Awaitable[~.Participant]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_participant" not in self._stubs: + self._stubs["get_participant"] = self.grpc_channel.unary_unary( + "/google.apps.meet.v2beta.ConferenceRecordsService/GetParticipant", + request_serializer=service.GetParticipantRequest.serialize, + response_deserializer=resource.Participant.deserialize, + ) + return self._stubs["get_participant"] + + @property + def list_participants( + self, + ) -> Callable[ + [service.ListParticipantsRequest], Awaitable[service.ListParticipantsResponse] + ]: + r"""Return a callable for the list participants method over gRPC. + + `Developer + Preview `__. + Lists the participants in a conference record, by default + ordered by join time and in descending order. This API supports + ``fields`` as standard parameters like every other API. However, + when the ``fields`` request parameter is omitted, this API + defaults to ``'participants/*, next_page_token'``. + + Returns: + Callable[[~.ListParticipantsRequest], + Awaitable[~.ListParticipantsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_participants" not in self._stubs: + self._stubs["list_participants"] = self.grpc_channel.unary_unary( + "/google.apps.meet.v2beta.ConferenceRecordsService/ListParticipants", + request_serializer=service.ListParticipantsRequest.serialize, + response_deserializer=service.ListParticipantsResponse.deserialize, + ) + return self._stubs["list_participants"] + + @property + def get_participant_session( + self, + ) -> Callable[ + [service.GetParticipantSessionRequest], Awaitable[resource.ParticipantSession] + ]: + r"""Return a callable for the get participant session method over gRPC. + + `Developer + Preview `__. + Gets a participant session by participant session ID. + + Returns: + Callable[[~.GetParticipantSessionRequest], + Awaitable[~.ParticipantSession]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_participant_session" not in self._stubs: + self._stubs["get_participant_session"] = self.grpc_channel.unary_unary( + "/google.apps.meet.v2beta.ConferenceRecordsService/GetParticipantSession", + request_serializer=service.GetParticipantSessionRequest.serialize, + response_deserializer=resource.ParticipantSession.deserialize, + ) + return self._stubs["get_participant_session"] + + @property + def list_participant_sessions( + self, + ) -> Callable[ + [service.ListParticipantSessionsRequest], + Awaitable[service.ListParticipantSessionsResponse], + ]: + r"""Return a callable for the list participant sessions method over gRPC. + + `Developer + Preview `__. + Lists the participant sessions of a participant in a conference + record, by default ordered by join time and in descending order. + This API supports ``fields`` as standard parameters like every + other API. However, when the ``fields`` request parameter is + omitted this API defaults to + ``'participantsessions/*, next_page_token'``. + + Returns: + Callable[[~.ListParticipantSessionsRequest], + Awaitable[~.ListParticipantSessionsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_participant_sessions" not in self._stubs: + self._stubs["list_participant_sessions"] = self.grpc_channel.unary_unary( + "/google.apps.meet.v2beta.ConferenceRecordsService/ListParticipantSessions", + request_serializer=service.ListParticipantSessionsRequest.serialize, + response_deserializer=service.ListParticipantSessionsResponse.deserialize, + ) + return self._stubs["list_participant_sessions"] + + @property + def get_recording( + self, + ) -> Callable[[service.GetRecordingRequest], Awaitable[resource.Recording]]: + r"""Return a callable for the get recording method over gRPC. + + `Developer + Preview `__. + Gets a recording by recording ID. + + Returns: + Callable[[~.GetRecordingRequest], + Awaitable[~.Recording]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_recording" not in self._stubs: + self._stubs["get_recording"] = self.grpc_channel.unary_unary( + "/google.apps.meet.v2beta.ConferenceRecordsService/GetRecording", + request_serializer=service.GetRecordingRequest.serialize, + response_deserializer=resource.Recording.deserialize, + ) + return self._stubs["get_recording"] + + @property + def list_recordings( + self, + ) -> Callable[ + [service.ListRecordingsRequest], Awaitable[service.ListRecordingsResponse] + ]: + r"""Return a callable for the list recordings method over gRPC. + + `Developer + Preview `__. + Lists the recording resources from the conference record. + + Returns: + Callable[[~.ListRecordingsRequest], + Awaitable[~.ListRecordingsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_recordings" not in self._stubs: + self._stubs["list_recordings"] = self.grpc_channel.unary_unary( + "/google.apps.meet.v2beta.ConferenceRecordsService/ListRecordings", + request_serializer=service.ListRecordingsRequest.serialize, + response_deserializer=service.ListRecordingsResponse.deserialize, + ) + return self._stubs["list_recordings"] + + @property + def get_transcript( + self, + ) -> Callable[[service.GetTranscriptRequest], Awaitable[resource.Transcript]]: + r"""Return a callable for the get transcript method over gRPC. + + `Developer + Preview `__. + Gets a transcript by transcript ID. + + Returns: + Callable[[~.GetTranscriptRequest], + Awaitable[~.Transcript]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_transcript" not in self._stubs: + self._stubs["get_transcript"] = self.grpc_channel.unary_unary( + "/google.apps.meet.v2beta.ConferenceRecordsService/GetTranscript", + request_serializer=service.GetTranscriptRequest.serialize, + response_deserializer=resource.Transcript.deserialize, + ) + return self._stubs["get_transcript"] + + @property + def list_transcripts( + self, + ) -> Callable[ + [service.ListTranscriptsRequest], Awaitable[service.ListTranscriptsResponse] + ]: + r"""Return a callable for the list transcripts method over gRPC. + + `Developer + Preview `__. + Lists the set of transcripts from the conference record. + + Returns: + Callable[[~.ListTranscriptsRequest], + Awaitable[~.ListTranscriptsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_transcripts" not in self._stubs: + self._stubs["list_transcripts"] = self.grpc_channel.unary_unary( + "/google.apps.meet.v2beta.ConferenceRecordsService/ListTranscripts", + request_serializer=service.ListTranscriptsRequest.serialize, + response_deserializer=service.ListTranscriptsResponse.deserialize, + ) + return self._stubs["list_transcripts"] + + @property + def get_transcript_entry( + self, + ) -> Callable[ + [service.GetTranscriptEntryRequest], Awaitable[resource.TranscriptEntry] + ]: + r"""Return a callable for the get transcript entry method over gRPC. + + `Developer + Preview `__. + Gets a ``TranscriptEntry`` resource by entry ID. + + Note: The transcript entries returned by the Google Meet API + might not match the transcription found in the Google Docs + transcript file. This can occur when the Google Docs transcript + file is modified after generation. + + Returns: + Callable[[~.GetTranscriptEntryRequest], + Awaitable[~.TranscriptEntry]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_transcript_entry" not in self._stubs: + self._stubs["get_transcript_entry"] = self.grpc_channel.unary_unary( + "/google.apps.meet.v2beta.ConferenceRecordsService/GetTranscriptEntry", + request_serializer=service.GetTranscriptEntryRequest.serialize, + response_deserializer=resource.TranscriptEntry.deserialize, + ) + return self._stubs["get_transcript_entry"] + + @property + def list_transcript_entries( + self, + ) -> Callable[ + [service.ListTranscriptEntriesRequest], + Awaitable[service.ListTranscriptEntriesResponse], + ]: + r"""Return a callable for the list transcript entries method over gRPC. + + `Developer + Preview `__. + Lists the structured transcript entries per transcript. By + default, ordered by start time and in ascending order. + + Note: The transcript entries returned by the Google Meet API + might not match the transcription found in the Google Docs + transcript file. This can occur when the Google Docs transcript + file is modified after generation. + + Returns: + Callable[[~.ListTranscriptEntriesRequest], + Awaitable[~.ListTranscriptEntriesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_transcript_entries" not in self._stubs: + self._stubs["list_transcript_entries"] = self.grpc_channel.unary_unary( + "/google.apps.meet.v2beta.ConferenceRecordsService/ListTranscriptEntries", + request_serializer=service.ListTranscriptEntriesRequest.serialize, + response_deserializer=service.ListTranscriptEntriesResponse.deserialize, + ) + return self._stubs["list_transcript_entries"] + + def close(self): + return self.grpc_channel.close() + + +__all__ = ("ConferenceRecordsServiceGrpcAsyncIOTransport",) diff --git a/packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/transports/rest.py b/packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/transports/rest.py new file mode 100644 index 000000000000..c4a66094ad03 --- /dev/null +++ b/packages/google-apps-meet/google/apps/meet_v2beta/services/conference_records_service/transports/rest.py @@ -0,0 +1,1718 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import dataclasses +import json # type: ignore +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + +from google.api_core import gapic_v1, path_template, rest_helpers, rest_streaming +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.transport.requests import AuthorizedSession # type: ignore +from google.protobuf import json_format +import grpc # type: ignore +from requests import __version__ as requests_version + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + + +from google.apps.meet_v2beta.types import resource, service + +from .base import ConferenceRecordsServiceTransport +from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + + +class ConferenceRecordsServiceRestInterceptor: + """Interceptor for ConferenceRecordsService. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the ConferenceRecordsServiceRestTransport. + + .. code-block:: python + class MyCustomConferenceRecordsServiceInterceptor(ConferenceRecordsServiceRestInterceptor): + def pre_get_conference_record(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_conference_record(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_participant(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_participant(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_participant_session(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_participant_session(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_recording(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_recording(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_transcript(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_transcript(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_transcript_entry(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_transcript_entry(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_conference_records(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_conference_records(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_participants(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_participants(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_participant_sessions(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_participant_sessions(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_recordings(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_recordings(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_transcript_entries(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_transcript_entries(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_transcripts(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_transcripts(self, response): + logging.log(f"Received response: {response}") + return response + + transport = ConferenceRecordsServiceRestTransport(interceptor=MyCustomConferenceRecordsServiceInterceptor()) + client = ConferenceRecordsServiceClient(transport=transport) + + + """ + + def pre_get_conference_record( + self, + request: service.GetConferenceRecordRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[service.GetConferenceRecordRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_conference_record + + Override in a subclass to manipulate the request or metadata + before they are sent to the ConferenceRecordsService server. + """ + return request, metadata + + def post_get_conference_record( + self, response: resource.ConferenceRecord + ) -> resource.ConferenceRecord: + """Post-rpc interceptor for get_conference_record + + Override in a subclass to manipulate the response + after it is returned by the ConferenceRecordsService server but before + it is returned to user code. + """ + return response + + def pre_get_participant( + self, + request: service.GetParticipantRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[service.GetParticipantRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_participant + + Override in a subclass to manipulate the request or metadata + before they are sent to the ConferenceRecordsService server. + """ + return request, metadata + + def post_get_participant( + self, response: resource.Participant + ) -> resource.Participant: + """Post-rpc interceptor for get_participant + + Override in a subclass to manipulate the response + after it is returned by the ConferenceRecordsService server but before + it is returned to user code. + """ + return response + + def pre_get_participant_session( + self, + request: service.GetParticipantSessionRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[service.GetParticipantSessionRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_participant_session + + Override in a subclass to manipulate the request or metadata + before they are sent to the ConferenceRecordsService server. + """ + return request, metadata + + def post_get_participant_session( + self, response: resource.ParticipantSession + ) -> resource.ParticipantSession: + """Post-rpc interceptor for get_participant_session + + Override in a subclass to manipulate the response + after it is returned by the ConferenceRecordsService server but before + it is returned to user code. + """ + return response + + def pre_get_recording( + self, request: service.GetRecordingRequest, metadata: Sequence[Tuple[str, str]] + ) -> Tuple[service.GetRecordingRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_recording + + Override in a subclass to manipulate the request or metadata + before they are sent to the ConferenceRecordsService server. + """ + return request, metadata + + def post_get_recording(self, response: resource.Recording) -> resource.Recording: + """Post-rpc interceptor for get_recording + + Override in a subclass to manipulate the response + after it is returned by the ConferenceRecordsService server but before + it is returned to user code. + """ + return response + + def pre_get_transcript( + self, request: service.GetTranscriptRequest, metadata: Sequence[Tuple[str, str]] + ) -> Tuple[service.GetTranscriptRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_transcript + + Override in a subclass to manipulate the request or metadata + before they are sent to the ConferenceRecordsService server. + """ + return request, metadata + + def post_get_transcript(self, response: resource.Transcript) -> resource.Transcript: + """Post-rpc interceptor for get_transcript + + Override in a subclass to manipulate the response + after it is returned by the ConferenceRecordsService server but before + it is returned to user code. + """ + return response + + def pre_get_transcript_entry( + self, + request: service.GetTranscriptEntryRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[service.GetTranscriptEntryRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_transcript_entry + + Override in a subclass to manipulate the request or metadata + before they are sent to the ConferenceRecordsService server. + """ + return request, metadata + + def post_get_transcript_entry( + self, response: resource.TranscriptEntry + ) -> resource.TranscriptEntry: + """Post-rpc interceptor for get_transcript_entry + + Override in a subclass to manipulate the response + after it is returned by the ConferenceRecordsService server but before + it is returned to user code. + """ + return response + + def pre_list_conference_records( + self, + request: service.ListConferenceRecordsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[service.ListConferenceRecordsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_conference_records + + Override in a subclass to manipulate the request or metadata + before they are sent to the ConferenceRecordsService server. + """ + return request, metadata + + def post_list_conference_records( + self, response: service.ListConferenceRecordsResponse + ) -> service.ListConferenceRecordsResponse: + """Post-rpc interceptor for list_conference_records + + Override in a subclass to manipulate the response + after it is returned by the ConferenceRecordsService server but before + it is returned to user code. + """ + return response + + def pre_list_participants( + self, + request: service.ListParticipantsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[service.ListParticipantsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_participants + + Override in a subclass to manipulate the request or metadata + before they are sent to the ConferenceRecordsService server. + """ + return request, metadata + + def post_list_participants( + self, response: service.ListParticipantsResponse + ) -> service.ListParticipantsResponse: + """Post-rpc interceptor for list_participants + + Override in a subclass to manipulate the response + after it is returned by the ConferenceRecordsService server but before + it is returned to user code. + """ + return response + + def pre_list_participant_sessions( + self, + request: service.ListParticipantSessionsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[service.ListParticipantSessionsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_participant_sessions + + Override in a subclass to manipulate the request or metadata + before they are sent to the ConferenceRecordsService server. + """ + return request, metadata + + def post_list_participant_sessions( + self, response: service.ListParticipantSessionsResponse + ) -> service.ListParticipantSessionsResponse: + """Post-rpc interceptor for list_participant_sessions + + Override in a subclass to manipulate the response + after it is returned by the ConferenceRecordsService server but before + it is returned to user code. + """ + return response + + def pre_list_recordings( + self, + request: service.ListRecordingsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[service.ListRecordingsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_recordings + + Override in a subclass to manipulate the request or metadata + before they are sent to the ConferenceRecordsService server. + """ + return request, metadata + + def post_list_recordings( + self, response: service.ListRecordingsResponse + ) -> service.ListRecordingsResponse: + """Post-rpc interceptor for list_recordings + + Override in a subclass to manipulate the response + after it is returned by the ConferenceRecordsService server but before + it is returned to user code. + """ + return response + + def pre_list_transcript_entries( + self, + request: service.ListTranscriptEntriesRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[service.ListTranscriptEntriesRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_transcript_entries + + Override in a subclass to manipulate the request or metadata + before they are sent to the ConferenceRecordsService server. + """ + return request, metadata + + def post_list_transcript_entries( + self, response: service.ListTranscriptEntriesResponse + ) -> service.ListTranscriptEntriesResponse: + """Post-rpc interceptor for list_transcript_entries + + Override in a subclass to manipulate the response + after it is returned by the ConferenceRecordsService server but before + it is returned to user code. + """ + return response + + def pre_list_transcripts( + self, + request: service.ListTranscriptsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[service.ListTranscriptsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_transcripts + + Override in a subclass to manipulate the request or metadata + before they are sent to the ConferenceRecordsService server. + """ + return request, metadata + + def post_list_transcripts( + self, response: service.ListTranscriptsResponse + ) -> service.ListTranscriptsResponse: + """Post-rpc interceptor for list_transcripts + + Override in a subclass to manipulate the response + after it is returned by the ConferenceRecordsService server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class ConferenceRecordsServiceRestStub: + _session: AuthorizedSession + _host: str + _interceptor: ConferenceRecordsServiceRestInterceptor + + +class ConferenceRecordsServiceRestTransport(ConferenceRecordsServiceTransport): + """REST backend transport for ConferenceRecordsService. + + REST API for services dealing with conference records. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + + """ + + def __init__( + self, + *, + host: str = "meet.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + interceptor: Optional[ConferenceRecordsServiceRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError( + f"Unexpected hostname structure: {host}" + ) # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST + ) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or ConferenceRecordsServiceRestInterceptor() + self._prep_wrapped_messages(client_info) + + class _GetConferenceRecord(ConferenceRecordsServiceRestStub): + def __hash__(self): + return hash("GetConferenceRecord") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: service.GetConferenceRecordRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resource.ConferenceRecord: + r"""Call the get conference record method over HTTP. + + Args: + request (~.service.GetConferenceRecordRequest): + The request object. Request to get a conference record. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.resource.ConferenceRecord: + `Developer + Preview `__. + Single instance of a meeting held in a space. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2beta/{name=conferenceRecords/*}", + }, + ] + request, metadata = self._interceptor.pre_get_conference_record( + request, metadata + ) + pb_request = service.GetConferenceRecordRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = resource.ConferenceRecord() + pb_resp = resource.ConferenceRecord.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_conference_record(resp) + return resp + + class _GetParticipant(ConferenceRecordsServiceRestStub): + def __hash__(self): + return hash("GetParticipant") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: service.GetParticipantRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resource.Participant: + r"""Call the get participant method over HTTP. + + Args: + request (~.service.GetParticipantRequest): + The request object. Request to get a Participant. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.resource.Participant: + `Developer + Preview `__. + User who attended or is attending a conference. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2beta/{name=conferenceRecords/*/participants/*}", + }, + ] + request, metadata = self._interceptor.pre_get_participant(request, metadata) + pb_request = service.GetParticipantRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = resource.Participant() + pb_resp = resource.Participant.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_participant(resp) + return resp + + class _GetParticipantSession(ConferenceRecordsServiceRestStub): + def __hash__(self): + return hash("GetParticipantSession") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: service.GetParticipantSessionRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resource.ParticipantSession: + r"""Call the get participant session method over HTTP. + + Args: + request (~.service.GetParticipantSessionRequest): + The request object. Request to get a participant session. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.resource.ParticipantSession: + `Developer + Preview `__. + Refers to each unique join/leave session when a user + joins a conference from a device. Note that any time a + user joins the conference a new unique ID is assigned. + That means if a user joins a space multiple times from + the same device, they're assigned different IDs, and are + also be treated as different participant sessions. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2beta/{name=conferenceRecords/*/participants/*/participantSessions/*}", + }, + ] + request, metadata = self._interceptor.pre_get_participant_session( + request, metadata + ) + pb_request = service.GetParticipantSessionRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = resource.ParticipantSession() + pb_resp = resource.ParticipantSession.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_participant_session(resp) + return resp + + class _GetRecording(ConferenceRecordsServiceRestStub): + def __hash__(self): + return hash("GetRecording") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: service.GetRecordingRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resource.Recording: + r"""Call the get recording method over HTTP. + + Args: + request (~.service.GetRecordingRequest): + The request object. Request message for GetRecording + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.resource.Recording: + `Developer + Preview `__. + Metadata about a recording created during a conference. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2beta/{name=conferenceRecords/*/recordings/*}", + }, + ] + request, metadata = self._interceptor.pre_get_recording(request, metadata) + pb_request = service.GetRecordingRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = resource.Recording() + pb_resp = resource.Recording.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_recording(resp) + return resp + + class _GetTranscript(ConferenceRecordsServiceRestStub): + def __hash__(self): + return hash("GetTranscript") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: service.GetTranscriptRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resource.Transcript: + r"""Call the get transcript method over HTTP. + + Args: + request (~.service.GetTranscriptRequest): + The request object. Request for GetTranscript method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.resource.Transcript: + `Developer + Preview `__. + Metadata for a transcript generated from a conference. + It refers to the ASR (Automatic Speech Recognition) + result of user's speech during the conference. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2beta/{name=conferenceRecords/*/transcripts/*}", + }, + ] + request, metadata = self._interceptor.pre_get_transcript(request, metadata) + pb_request = service.GetTranscriptRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = resource.Transcript() + pb_resp = resource.Transcript.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_transcript(resp) + return resp + + class _GetTranscriptEntry(ConferenceRecordsServiceRestStub): + def __hash__(self): + return hash("GetTranscriptEntry") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: service.GetTranscriptEntryRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resource.TranscriptEntry: + r"""Call the get transcript entry method over HTTP. + + Args: + request (~.service.GetTranscriptEntryRequest): + The request object. Request for GetTranscriptEntry + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.resource.TranscriptEntry: + `Developer + Preview `__. + Single entry for one user’s speech during a transcript + session. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2beta/{name=conferenceRecords/*/transcripts/*/entries/*}", + }, + ] + request, metadata = self._interceptor.pre_get_transcript_entry( + request, metadata + ) + pb_request = service.GetTranscriptEntryRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = resource.TranscriptEntry() + pb_resp = resource.TranscriptEntry.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_transcript_entry(resp) + return resp + + class _ListConferenceRecords(ConferenceRecordsServiceRestStub): + def __hash__(self): + return hash("ListConferenceRecords") + + def __call__( + self, + request: service.ListConferenceRecordsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> service.ListConferenceRecordsResponse: + r"""Call the list conference records method over HTTP. + + Args: + request (~.service.ListConferenceRecordsRequest): + The request object. Request to fetch list of conference + records per user. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.service.ListConferenceRecordsResponse: + Response of ListConferenceRecords + method. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2beta/conferenceRecords", + }, + ] + request, metadata = self._interceptor.pre_list_conference_records( + request, metadata + ) + pb_request = service.ListConferenceRecordsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = service.ListConferenceRecordsResponse() + pb_resp = service.ListConferenceRecordsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_conference_records(resp) + return resp + + class _ListParticipants(ConferenceRecordsServiceRestStub): + def __hash__(self): + return hash("ListParticipants") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: service.ListParticipantsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> service.ListParticipantsResponse: + r"""Call the list participants method over HTTP. + + Args: + request (~.service.ListParticipantsRequest): + The request object. Request to fetch list of participant + per conference. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.service.ListParticipantsResponse: + Response of ListParticipants method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2beta/{parent=conferenceRecords/*}/participants", + }, + ] + request, metadata = self._interceptor.pre_list_participants( + request, metadata + ) + pb_request = service.ListParticipantsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = service.ListParticipantsResponse() + pb_resp = service.ListParticipantsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_participants(resp) + return resp + + class _ListParticipantSessions(ConferenceRecordsServiceRestStub): + def __hash__(self): + return hash("ListParticipantSessions") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: service.ListParticipantSessionsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> service.ListParticipantSessionsResponse: + r"""Call the list participant sessions method over HTTP. + + Args: + request (~.service.ListParticipantSessionsRequest): + The request object. Request to fetch list of participant + sessions per conference record per + participant. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.service.ListParticipantSessionsResponse: + Response of ListParticipants method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2beta/{parent=conferenceRecords/*/participants/*}/participantSessions", + }, + ] + request, metadata = self._interceptor.pre_list_participant_sessions( + request, metadata + ) + pb_request = service.ListParticipantSessionsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = service.ListParticipantSessionsResponse() + pb_resp = service.ListParticipantSessionsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_participant_sessions(resp) + return resp + + class _ListRecordings(ConferenceRecordsServiceRestStub): + def __hash__(self): + return hash("ListRecordings") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: service.ListRecordingsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> service.ListRecordingsResponse: + r"""Call the list recordings method over HTTP. + + Args: + request (~.service.ListRecordingsRequest): + The request object. Request for ListRecordings method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.service.ListRecordingsResponse: + Response for ListRecordings method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2beta/{parent=conferenceRecords/*}/recordings", + }, + ] + request, metadata = self._interceptor.pre_list_recordings(request, metadata) + pb_request = service.ListRecordingsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = service.ListRecordingsResponse() + pb_resp = service.ListRecordingsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_recordings(resp) + return resp + + class _ListTranscriptEntries(ConferenceRecordsServiceRestStub): + def __hash__(self): + return hash("ListTranscriptEntries") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: service.ListTranscriptEntriesRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> service.ListTranscriptEntriesResponse: + r"""Call the list transcript entries method over HTTP. + + Args: + request (~.service.ListTranscriptEntriesRequest): + The request object. Request for ListTranscriptEntries + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.service.ListTranscriptEntriesResponse: + Response for ListTranscriptEntries + method + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2beta/{parent=conferenceRecords/*/transcripts/*}/entries", + }, + ] + request, metadata = self._interceptor.pre_list_transcript_entries( + request, metadata + ) + pb_request = service.ListTranscriptEntriesRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = service.ListTranscriptEntriesResponse() + pb_resp = service.ListTranscriptEntriesResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_transcript_entries(resp) + return resp + + class _ListTranscripts(ConferenceRecordsServiceRestStub): + def __hash__(self): + return hash("ListTranscripts") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: service.ListTranscriptsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> service.ListTranscriptsResponse: + r"""Call the list transcripts method over HTTP. + + Args: + request (~.service.ListTranscriptsRequest): + The request object. Request for ListTranscripts method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.service.ListTranscriptsResponse: + Response for ListTranscripts method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2beta/{parent=conferenceRecords/*}/transcripts", + }, + ] + request, metadata = self._interceptor.pre_list_transcripts( + request, metadata + ) + pb_request = service.ListTranscriptsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = service.ListTranscriptsResponse() + pb_resp = service.ListTranscriptsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_transcripts(resp) + return resp + + @property + def get_conference_record( + self, + ) -> Callable[[service.GetConferenceRecordRequest], resource.ConferenceRecord]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetConferenceRecord(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_participant( + self, + ) -> Callable[[service.GetParticipantRequest], resource.Participant]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetParticipant(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_participant_session( + self, + ) -> Callable[[service.GetParticipantSessionRequest], resource.ParticipantSession]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetParticipantSession(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_recording( + self, + ) -> Callable[[service.GetRecordingRequest], resource.Recording]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetRecording(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_transcript( + self, + ) -> Callable[[service.GetTranscriptRequest], resource.Transcript]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetTranscript(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_transcript_entry( + self, + ) -> Callable[[service.GetTranscriptEntryRequest], resource.TranscriptEntry]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetTranscriptEntry(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_conference_records( + self, + ) -> Callable[ + [service.ListConferenceRecordsRequest], service.ListConferenceRecordsResponse + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListConferenceRecords(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_participants( + self, + ) -> Callable[[service.ListParticipantsRequest], service.ListParticipantsResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListParticipants(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_participant_sessions( + self, + ) -> Callable[ + [service.ListParticipantSessionsRequest], + service.ListParticipantSessionsResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListParticipantSessions(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_recordings( + self, + ) -> Callable[[service.ListRecordingsRequest], service.ListRecordingsResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListRecordings(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_transcript_entries( + self, + ) -> Callable[ + [service.ListTranscriptEntriesRequest], service.ListTranscriptEntriesResponse + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListTranscriptEntries(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_transcripts( + self, + ) -> Callable[[service.ListTranscriptsRequest], service.ListTranscriptsResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListTranscripts(self._session, self._host, self._interceptor) # type: ignore + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__ = ("ConferenceRecordsServiceRestTransport",) diff --git a/packages/google-apps-meet/google/apps/meet_v2beta/services/spaces_service/__init__.py b/packages/google-apps-meet/google/apps/meet_v2beta/services/spaces_service/__init__.py new file mode 100644 index 000000000000..497fa1835771 --- /dev/null +++ b/packages/google-apps-meet/google/apps/meet_v2beta/services/spaces_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .async_client import SpacesServiceAsyncClient +from .client import SpacesServiceClient + +__all__ = ( + "SpacesServiceClient", + "SpacesServiceAsyncClient", +) diff --git a/packages/google-apps-meet/google/apps/meet_v2beta/services/spaces_service/async_client.py b/packages/google-apps-meet/google/apps/meet_v2beta/services/spaces_service/async_client.py new file mode 100644 index 000000000000..81e8e52ac953 --- /dev/null +++ b/packages/google-apps-meet/google/apps/meet_v2beta/services/spaces_service/async_client.py @@ -0,0 +1,653 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry_async as retries +from google.api_core.client_options import ClientOptions +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.apps.meet_v2beta import gapic_version as package_version + +try: + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore + +from google.protobuf import field_mask_pb2 # type: ignore + +from google.apps.meet_v2beta.types import resource, service + +from .client import SpacesServiceClient +from .transports.base import DEFAULT_CLIENT_INFO, SpacesServiceTransport +from .transports.grpc_asyncio import SpacesServiceGrpcAsyncIOTransport + + +class SpacesServiceAsyncClient: + """REST API for services dealing with spaces.""" + + _client: SpacesServiceClient + + DEFAULT_ENDPOINT = SpacesServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = SpacesServiceClient.DEFAULT_MTLS_ENDPOINT + + conference_record_path = staticmethod(SpacesServiceClient.conference_record_path) + parse_conference_record_path = staticmethod( + SpacesServiceClient.parse_conference_record_path + ) + space_path = staticmethod(SpacesServiceClient.space_path) + parse_space_path = staticmethod(SpacesServiceClient.parse_space_path) + common_billing_account_path = staticmethod( + SpacesServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + SpacesServiceClient.parse_common_billing_account_path + ) + common_folder_path = staticmethod(SpacesServiceClient.common_folder_path) + parse_common_folder_path = staticmethod( + SpacesServiceClient.parse_common_folder_path + ) + common_organization_path = staticmethod( + SpacesServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + SpacesServiceClient.parse_common_organization_path + ) + common_project_path = staticmethod(SpacesServiceClient.common_project_path) + parse_common_project_path = staticmethod( + SpacesServiceClient.parse_common_project_path + ) + common_location_path = staticmethod(SpacesServiceClient.common_location_path) + parse_common_location_path = staticmethod( + SpacesServiceClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SpacesServiceAsyncClient: The constructed client. + """ + return SpacesServiceClient.from_service_account_info.__func__(SpacesServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SpacesServiceAsyncClient: The constructed client. + """ + return SpacesServiceClient.from_service_account_file.__func__(SpacesServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return SpacesServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> SpacesServiceTransport: + """Returns the transport used by the client instance. + + Returns: + SpacesServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial( + type(SpacesServiceClient).get_transport_class, type(SpacesServiceClient) + ) + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, SpacesServiceTransport] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the spaces service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.SpacesServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = SpacesServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def create_space( + self, + request: Optional[Union[service.CreateSpaceRequest, dict]] = None, + *, + space: Optional[resource.Space] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resource.Space: + r"""`Developer + Preview `__. + Creates a space. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.apps import meet_v2beta + + async def sample_create_space(): + # Create a client + client = meet_v2beta.SpacesServiceAsyncClient() + + # Initialize request argument(s) + request = meet_v2beta.CreateSpaceRequest( + ) + + # Make the request + response = await client.create_space(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.apps.meet_v2beta.types.CreateSpaceRequest, dict]]): + The request object. Request to create a space. + space (:class:`google.apps.meet_v2beta.types.Space`): + Space to be created. As of May 2023, + the input space can be empty. Later on + the input space can be non-empty when + space configuration is introduced. + + This corresponds to the ``space`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.apps.meet_v2beta.types.Space: + [Developer Preview](\ https://developers.google.com/workspace/preview). + Virtual place where conferences are held. Only one + active conference can be held in one space at any + given time. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([space]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.CreateSpaceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if space is not None: + request.space = space + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_space, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_space( + self, + request: Optional[Union[service.GetSpaceRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resource.Space: + r"""`Developer + Preview `__. + Gets a space by ``space_id`` or ``meeting_code``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.apps import meet_v2beta + + async def sample_get_space(): + # Create a client + client = meet_v2beta.SpacesServiceAsyncClient() + + # Initialize request argument(s) + request = meet_v2beta.GetSpaceRequest( + name="name_value", + ) + + # Make the request + response = await client.get_space(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.apps.meet_v2beta.types.GetSpaceRequest, dict]]): + The request object. Request to get a space. + name (:class:`str`): + Required. Resource name of the space. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.apps.meet_v2beta.types.Space: + [Developer Preview](\ https://developers.google.com/workspace/preview). + Virtual place where conferences are held. Only one + active conference can be held in one space at any + given time. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.GetSpaceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_space, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_space( + self, + request: Optional[Union[service.UpdateSpaceRequest, dict]] = None, + *, + space: Optional[resource.Space] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resource.Space: + r"""`Developer + Preview `__. + Updates a space. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.apps import meet_v2beta + + async def sample_update_space(): + # Create a client + client = meet_v2beta.SpacesServiceAsyncClient() + + # Initialize request argument(s) + request = meet_v2beta.UpdateSpaceRequest( + ) + + # Make the request + response = await client.update_space(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.apps.meet_v2beta.types.UpdateSpaceRequest, dict]]): + The request object. Request to update a space. + space (:class:`google.apps.meet_v2beta.types.Space`): + Required. Space to be updated. + This corresponds to the ``space`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Optional. Field mask used to specify the fields to be + updated in the space. If update_mask isn't provided, it + defaults to '*' and updates all fields provided in the + request, including deleting fields not set in the + request. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.apps.meet_v2beta.types.Space: + [Developer Preview](\ https://developers.google.com/workspace/preview). + Virtual place where conferences are held. Only one + active conference can be held in one space at any + given time. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([space, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.UpdateSpaceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if space is not None: + request.space = space + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_space, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("space.name", request.space.name),) + ), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def end_active_conference( + self, + request: Optional[Union[service.EndActiveConferenceRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""`Developer + Preview `__. + Ends an active conference (if there is one). + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.apps import meet_v2beta + + async def sample_end_active_conference(): + # Create a client + client = meet_v2beta.SpacesServiceAsyncClient() + + # Initialize request argument(s) + request = meet_v2beta.EndActiveConferenceRequest( + name="name_value", + ) + + # Make the request + await client.end_active_conference(request=request) + + Args: + request (Optional[Union[google.apps.meet_v2beta.types.EndActiveConferenceRequest, dict]]): + The request object. Request to end an ongoing conference + of a space. + name (:class:`str`): + Required. Resource name of the space. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.EndActiveConferenceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.end_active_conference, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def __aenter__(self) -> "SpacesServiceAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("SpacesServiceAsyncClient",) diff --git a/packages/google-apps-meet/google/apps/meet_v2beta/services/spaces_service/client.py b/packages/google-apps-meet/google/apps/meet_v2beta/services/spaces_service/client.py new file mode 100644 index 000000000000..b262f0eae19a --- /dev/null +++ b/packages/google-apps-meet/google/apps/meet_v2beta/services/spaces_service/client.py @@ -0,0 +1,882 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.apps.meet_v2beta import gapic_version as package_version + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.protobuf import field_mask_pb2 # type: ignore + +from google.apps.meet_v2beta.types import resource, service + +from .transports.base import DEFAULT_CLIENT_INFO, SpacesServiceTransport +from .transports.grpc import SpacesServiceGrpcTransport +from .transports.grpc_asyncio import SpacesServiceGrpcAsyncIOTransport +from .transports.rest import SpacesServiceRestTransport + + +class SpacesServiceClientMeta(type): + """Metaclass for the SpacesService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = OrderedDict() # type: Dict[str, Type[SpacesServiceTransport]] + _transport_registry["grpc"] = SpacesServiceGrpcTransport + _transport_registry["grpc_asyncio"] = SpacesServiceGrpcAsyncIOTransport + _transport_registry["rest"] = SpacesServiceRestTransport + + def get_transport_class( + cls, + label: Optional[str] = None, + ) -> Type[SpacesServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class SpacesServiceClient(metaclass=SpacesServiceClientMeta): + """REST API for services dealing with spaces.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "meet.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SpacesServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SpacesServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> SpacesServiceTransport: + """Returns the transport used by the client instance. + + Returns: + SpacesServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def conference_record_path( + conference_record: str, + ) -> str: + """Returns a fully-qualified conference_record string.""" + return "conferenceRecords/{conference_record}".format( + conference_record=conference_record, + ) + + @staticmethod + def parse_conference_record_path(path: str) -> Dict[str, str]: + """Parses a conference_record path into its component segments.""" + m = re.match(r"^conferenceRecords/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def space_path( + space: str, + ) -> str: + """Returns a fully-qualified space string.""" + return "spaces/{space}".format( + space=space, + ) + + @staticmethod + def parse_space_path(path: str) -> Dict[str, str]: + """Parses a space path into its component segments.""" + m = re.match(r"^spaces/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path( + billing_account: str, + ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path( + folder: str, + ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format( + folder=folder, + ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path( + organization: str, + ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format( + organization=organization, + ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path( + project: str, + ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format( + project=project, + ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path( + project: str, + location: str, + ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[client_options_lib.ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[Union[str, SpacesServiceTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the spaces service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, SpacesServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + client_options = cast(client_options_lib.ClientOptions, client_options) + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( + client_options + ) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError( + "client_options.api_key and credentials are mutually exclusive" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, SpacesServiceTransport): + # transport is a SpacesServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr( + google.auth._default, "get_api_key_credentials" + ): + credentials = google.auth._default.get_api_key_credentials( + api_key_value + ) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=client_options.api_audience, + ) + + def create_space( + self, + request: Optional[Union[service.CreateSpaceRequest, dict]] = None, + *, + space: Optional[resource.Space] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resource.Space: + r"""`Developer + Preview `__. + Creates a space. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.apps import meet_v2beta + + def sample_create_space(): + # Create a client + client = meet_v2beta.SpacesServiceClient() + + # Initialize request argument(s) + request = meet_v2beta.CreateSpaceRequest( + ) + + # Make the request + response = client.create_space(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.apps.meet_v2beta.types.CreateSpaceRequest, dict]): + The request object. Request to create a space. + space (google.apps.meet_v2beta.types.Space): + Space to be created. As of May 2023, + the input space can be empty. Later on + the input space can be non-empty when + space configuration is introduced. + + This corresponds to the ``space`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.apps.meet_v2beta.types.Space: + [Developer Preview](\ https://developers.google.com/workspace/preview). + Virtual place where conferences are held. Only one + active conference can be held in one space at any + given time. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([space]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.CreateSpaceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.CreateSpaceRequest): + request = service.CreateSpaceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if space is not None: + request.space = space + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_space] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_space( + self, + request: Optional[Union[service.GetSpaceRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resource.Space: + r"""`Developer + Preview `__. + Gets a space by ``space_id`` or ``meeting_code``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.apps import meet_v2beta + + def sample_get_space(): + # Create a client + client = meet_v2beta.SpacesServiceClient() + + # Initialize request argument(s) + request = meet_v2beta.GetSpaceRequest( + name="name_value", + ) + + # Make the request + response = client.get_space(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.apps.meet_v2beta.types.GetSpaceRequest, dict]): + The request object. Request to get a space. + name (str): + Required. Resource name of the space. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.apps.meet_v2beta.types.Space: + [Developer Preview](\ https://developers.google.com/workspace/preview). + Virtual place where conferences are held. Only one + active conference can be held in one space at any + given time. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.GetSpaceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.GetSpaceRequest): + request = service.GetSpaceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_space] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_space( + self, + request: Optional[Union[service.UpdateSpaceRequest, dict]] = None, + *, + space: Optional[resource.Space] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resource.Space: + r"""`Developer + Preview `__. + Updates a space. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.apps import meet_v2beta + + def sample_update_space(): + # Create a client + client = meet_v2beta.SpacesServiceClient() + + # Initialize request argument(s) + request = meet_v2beta.UpdateSpaceRequest( + ) + + # Make the request + response = client.update_space(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.apps.meet_v2beta.types.UpdateSpaceRequest, dict]): + The request object. Request to update a space. + space (google.apps.meet_v2beta.types.Space): + Required. Space to be updated. + This corresponds to the ``space`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. Field mask used to specify the fields to be + updated in the space. If update_mask isn't provided, it + defaults to '*' and updates all fields provided in the + request, including deleting fields not set in the + request. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.apps.meet_v2beta.types.Space: + [Developer Preview](\ https://developers.google.com/workspace/preview). + Virtual place where conferences are held. Only one + active conference can be held in one space at any + given time. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([space, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.UpdateSpaceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.UpdateSpaceRequest): + request = service.UpdateSpaceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if space is not None: + request.space = space + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_space] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("space.name", request.space.name),) + ), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def end_active_conference( + self, + request: Optional[Union[service.EndActiveConferenceRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""`Developer + Preview `__. + Ends an active conference (if there is one). + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.apps import meet_v2beta + + def sample_end_active_conference(): + # Create a client + client = meet_v2beta.SpacesServiceClient() + + # Initialize request argument(s) + request = meet_v2beta.EndActiveConferenceRequest( + name="name_value", + ) + + # Make the request + client.end_active_conference(request=request) + + Args: + request (Union[google.apps.meet_v2beta.types.EndActiveConferenceRequest, dict]): + The request object. Request to end an ongoing conference + of a space. + name (str): + Required. Resource name of the space. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.EndActiveConferenceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.EndActiveConferenceRequest): + request = service.EndActiveConferenceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.end_active_conference] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def __enter__(self) -> "SpacesServiceClient": + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("SpacesServiceClient",) diff --git a/packages/google-apps-meet/google/apps/meet_v2beta/services/spaces_service/transports/__init__.py b/packages/google-apps-meet/google/apps/meet_v2beta/services/spaces_service/transports/__init__.py new file mode 100644 index 000000000000..fb7d7c132f24 --- /dev/null +++ b/packages/google-apps-meet/google/apps/meet_v2beta/services/spaces_service/transports/__init__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import SpacesServiceTransport +from .grpc import SpacesServiceGrpcTransport +from .grpc_asyncio import SpacesServiceGrpcAsyncIOTransport +from .rest import SpacesServiceRestInterceptor, SpacesServiceRestTransport + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[SpacesServiceTransport]] +_transport_registry["grpc"] = SpacesServiceGrpcTransport +_transport_registry["grpc_asyncio"] = SpacesServiceGrpcAsyncIOTransport +_transport_registry["rest"] = SpacesServiceRestTransport + +__all__ = ( + "SpacesServiceTransport", + "SpacesServiceGrpcTransport", + "SpacesServiceGrpcAsyncIOTransport", + "SpacesServiceRestTransport", + "SpacesServiceRestInterceptor", +) diff --git a/packages/google-apps-meet/google/apps/meet_v2beta/services/spaces_service/transports/base.py b/packages/google-apps-meet/google/apps/meet_v2beta/services/spaces_service/transports/base.py new file mode 100644 index 000000000000..76e05b757e11 --- /dev/null +++ b/packages/google-apps-meet/google/apps/meet_v2beta/services/spaces_service/transports/base.py @@ -0,0 +1,204 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore +from google.protobuf import empty_pb2 # type: ignore + +from google.apps.meet_v2beta import gapic_version as package_version +from google.apps.meet_v2beta.types import resource, service + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +class SpacesServiceTransport(abc.ABC): + """Abstract transport class for SpacesService.""" + + AUTH_SCOPES = () + + DEFAULT_HOST: str = "meet.googleapis.com" + + def __init__( + self, + *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default( + **scopes_kwargs, quota_project_id=quota_project_id + ) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience( + api_audience if api_audience else host + ) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if ( + always_use_jwt_access + and isinstance(credentials, service_account.Credentials) + and hasattr(service_account.Credentials, "with_always_use_jwt_access") + ): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_space: gapic_v1.method.wrap_method( + self.create_space, + default_timeout=60.0, + client_info=client_info, + ), + self.get_space: gapic_v1.method.wrap_method( + self.get_space, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.update_space: gapic_v1.method.wrap_method( + self.update_space, + default_timeout=60.0, + client_info=client_info, + ), + self.end_active_conference: gapic_v1.method.wrap_method( + self.end_active_conference, + default_timeout=60.0, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def create_space( + self, + ) -> Callable[ + [service.CreateSpaceRequest], Union[resource.Space, Awaitable[resource.Space]] + ]: + raise NotImplementedError() + + @property + def get_space( + self, + ) -> Callable[ + [service.GetSpaceRequest], Union[resource.Space, Awaitable[resource.Space]] + ]: + raise NotImplementedError() + + @property + def update_space( + self, + ) -> Callable[ + [service.UpdateSpaceRequest], Union[resource.Space, Awaitable[resource.Space]] + ]: + raise NotImplementedError() + + @property + def end_active_conference( + self, + ) -> Callable[ + [service.EndActiveConferenceRequest], + Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ("SpacesServiceTransport",) diff --git a/packages/google-apps-meet/google/apps/meet_v2beta/services/spaces_service/transports/grpc.py b/packages/google-apps-meet/google/apps/meet_v2beta/services/spaces_service/transports/grpc.py new file mode 100644 index 000000000000..31b533a4c005 --- /dev/null +++ b/packages/google-apps-meet/google/apps/meet_v2beta/services/spaces_service/transports/grpc.py @@ -0,0 +1,346 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +from google.api_core import gapic_v1, grpc_helpers +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.protobuf import empty_pb2 # type: ignore +import grpc # type: ignore + +from google.apps.meet_v2beta.types import resource, service + +from .base import DEFAULT_CLIENT_INFO, SpacesServiceTransport + + +class SpacesServiceGrpcTransport(SpacesServiceTransport): + """gRPC backend transport for SpacesService. + + REST API for services dealing with spaces. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "meet.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "meet.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service.""" + return self._grpc_channel + + @property + def create_space(self) -> Callable[[service.CreateSpaceRequest], resource.Space]: + r"""Return a callable for the create space method over gRPC. + + `Developer + Preview `__. + Creates a space. + + Returns: + Callable[[~.CreateSpaceRequest], + ~.Space]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_space" not in self._stubs: + self._stubs["create_space"] = self.grpc_channel.unary_unary( + "/google.apps.meet.v2beta.SpacesService/CreateSpace", + request_serializer=service.CreateSpaceRequest.serialize, + response_deserializer=resource.Space.deserialize, + ) + return self._stubs["create_space"] + + @property + def get_space(self) -> Callable[[service.GetSpaceRequest], resource.Space]: + r"""Return a callable for the get space method over gRPC. + + `Developer + Preview `__. + Gets a space by ``space_id`` or ``meeting_code``. + + Returns: + Callable[[~.GetSpaceRequest], + ~.Space]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_space" not in self._stubs: + self._stubs["get_space"] = self.grpc_channel.unary_unary( + "/google.apps.meet.v2beta.SpacesService/GetSpace", + request_serializer=service.GetSpaceRequest.serialize, + response_deserializer=resource.Space.deserialize, + ) + return self._stubs["get_space"] + + @property + def update_space(self) -> Callable[[service.UpdateSpaceRequest], resource.Space]: + r"""Return a callable for the update space method over gRPC. + + `Developer + Preview `__. + Updates a space. + + Returns: + Callable[[~.UpdateSpaceRequest], + ~.Space]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_space" not in self._stubs: + self._stubs["update_space"] = self.grpc_channel.unary_unary( + "/google.apps.meet.v2beta.SpacesService/UpdateSpace", + request_serializer=service.UpdateSpaceRequest.serialize, + response_deserializer=resource.Space.deserialize, + ) + return self._stubs["update_space"] + + @property + def end_active_conference( + self, + ) -> Callable[[service.EndActiveConferenceRequest], empty_pb2.Empty]: + r"""Return a callable for the end active conference method over gRPC. + + `Developer + Preview `__. + Ends an active conference (if there is one). + + Returns: + Callable[[~.EndActiveConferenceRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "end_active_conference" not in self._stubs: + self._stubs["end_active_conference"] = self.grpc_channel.unary_unary( + "/google.apps.meet.v2beta.SpacesService/EndActiveConference", + request_serializer=service.EndActiveConferenceRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["end_active_conference"] + + def close(self): + self.grpc_channel.close() + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ("SpacesServiceGrpcTransport",) diff --git a/packages/google-apps-meet/google/apps/meet_v2beta/services/spaces_service/transports/grpc_asyncio.py b/packages/google-apps-meet/google/apps/meet_v2beta/services/spaces_service/transports/grpc_asyncio.py new file mode 100644 index 000000000000..a38f135f8012 --- /dev/null +++ b/packages/google-apps-meet/google/apps/meet_v2beta/services/spaces_service/transports/grpc_asyncio.py @@ -0,0 +1,351 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +from google.api_core import gapic_v1, grpc_helpers_async +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.protobuf import empty_pb2 # type: ignore +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.apps.meet_v2beta.types import resource, service + +from .base import DEFAULT_CLIENT_INFO, SpacesServiceTransport +from .grpc import SpacesServiceGrpcTransport + + +class SpacesServiceGrpcAsyncIOTransport(SpacesServiceTransport): + """gRPC AsyncIO backend transport for SpacesService. + + REST API for services dealing with spaces. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "meet.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "meet.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def create_space( + self, + ) -> Callable[[service.CreateSpaceRequest], Awaitable[resource.Space]]: + r"""Return a callable for the create space method over gRPC. + + `Developer + Preview `__. + Creates a space. + + Returns: + Callable[[~.CreateSpaceRequest], + Awaitable[~.Space]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_space" not in self._stubs: + self._stubs["create_space"] = self.grpc_channel.unary_unary( + "/google.apps.meet.v2beta.SpacesService/CreateSpace", + request_serializer=service.CreateSpaceRequest.serialize, + response_deserializer=resource.Space.deserialize, + ) + return self._stubs["create_space"] + + @property + def get_space( + self, + ) -> Callable[[service.GetSpaceRequest], Awaitable[resource.Space]]: + r"""Return a callable for the get space method over gRPC. + + `Developer + Preview `__. + Gets a space by ``space_id`` or ``meeting_code``. + + Returns: + Callable[[~.GetSpaceRequest], + Awaitable[~.Space]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_space" not in self._stubs: + self._stubs["get_space"] = self.grpc_channel.unary_unary( + "/google.apps.meet.v2beta.SpacesService/GetSpace", + request_serializer=service.GetSpaceRequest.serialize, + response_deserializer=resource.Space.deserialize, + ) + return self._stubs["get_space"] + + @property + def update_space( + self, + ) -> Callable[[service.UpdateSpaceRequest], Awaitable[resource.Space]]: + r"""Return a callable for the update space method over gRPC. + + `Developer + Preview `__. + Updates a space. + + Returns: + Callable[[~.UpdateSpaceRequest], + Awaitable[~.Space]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_space" not in self._stubs: + self._stubs["update_space"] = self.grpc_channel.unary_unary( + "/google.apps.meet.v2beta.SpacesService/UpdateSpace", + request_serializer=service.UpdateSpaceRequest.serialize, + response_deserializer=resource.Space.deserialize, + ) + return self._stubs["update_space"] + + @property + def end_active_conference( + self, + ) -> Callable[[service.EndActiveConferenceRequest], Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the end active conference method over gRPC. + + `Developer + Preview `__. + Ends an active conference (if there is one). + + Returns: + Callable[[~.EndActiveConferenceRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "end_active_conference" not in self._stubs: + self._stubs["end_active_conference"] = self.grpc_channel.unary_unary( + "/google.apps.meet.v2beta.SpacesService/EndActiveConference", + request_serializer=service.EndActiveConferenceRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["end_active_conference"] + + def close(self): + return self.grpc_channel.close() + + +__all__ = ("SpacesServiceGrpcAsyncIOTransport",) diff --git a/packages/google-apps-meet/google/apps/meet_v2beta/services/spaces_service/transports/rest.py b/packages/google-apps-meet/google/apps/meet_v2beta/services/spaces_service/transports/rest.py new file mode 100644 index 000000000000..251086436b8e --- /dev/null +++ b/packages/google-apps-meet/google/apps/meet_v2beta/services/spaces_service/transports/rest.py @@ -0,0 +1,668 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import dataclasses +import json # type: ignore +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + +from google.api_core import gapic_v1, path_template, rest_helpers, rest_streaming +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.transport.requests import AuthorizedSession # type: ignore +from google.protobuf import json_format +import grpc # type: ignore +from requests import __version__ as requests_version + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + + +from google.protobuf import empty_pb2 # type: ignore + +from google.apps.meet_v2beta.types import resource, service + +from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO +from .base import SpacesServiceTransport + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + + +class SpacesServiceRestInterceptor: + """Interceptor for SpacesService. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the SpacesServiceRestTransport. + + .. code-block:: python + class MyCustomSpacesServiceInterceptor(SpacesServiceRestInterceptor): + def pre_create_space(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_space(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_end_active_conference(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def pre_get_space(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_space(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_update_space(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_space(self, response): + logging.log(f"Received response: {response}") + return response + + transport = SpacesServiceRestTransport(interceptor=MyCustomSpacesServiceInterceptor()) + client = SpacesServiceClient(transport=transport) + + + """ + + def pre_create_space( + self, request: service.CreateSpaceRequest, metadata: Sequence[Tuple[str, str]] + ) -> Tuple[service.CreateSpaceRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for create_space + + Override in a subclass to manipulate the request or metadata + before they are sent to the SpacesService server. + """ + return request, metadata + + def post_create_space(self, response: resource.Space) -> resource.Space: + """Post-rpc interceptor for create_space + + Override in a subclass to manipulate the response + after it is returned by the SpacesService server but before + it is returned to user code. + """ + return response + + def pre_end_active_conference( + self, + request: service.EndActiveConferenceRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[service.EndActiveConferenceRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for end_active_conference + + Override in a subclass to manipulate the request or metadata + before they are sent to the SpacesService server. + """ + return request, metadata + + def pre_get_space( + self, request: service.GetSpaceRequest, metadata: Sequence[Tuple[str, str]] + ) -> Tuple[service.GetSpaceRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_space + + Override in a subclass to manipulate the request or metadata + before they are sent to the SpacesService server. + """ + return request, metadata + + def post_get_space(self, response: resource.Space) -> resource.Space: + """Post-rpc interceptor for get_space + + Override in a subclass to manipulate the response + after it is returned by the SpacesService server but before + it is returned to user code. + """ + return response + + def pre_update_space( + self, request: service.UpdateSpaceRequest, metadata: Sequence[Tuple[str, str]] + ) -> Tuple[service.UpdateSpaceRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for update_space + + Override in a subclass to manipulate the request or metadata + before they are sent to the SpacesService server. + """ + return request, metadata + + def post_update_space(self, response: resource.Space) -> resource.Space: + """Post-rpc interceptor for update_space + + Override in a subclass to manipulate the response + after it is returned by the SpacesService server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class SpacesServiceRestStub: + _session: AuthorizedSession + _host: str + _interceptor: SpacesServiceRestInterceptor + + +class SpacesServiceRestTransport(SpacesServiceTransport): + """REST backend transport for SpacesService. + + REST API for services dealing with spaces. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + + """ + + def __init__( + self, + *, + host: str = "meet.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + interceptor: Optional[SpacesServiceRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError( + f"Unexpected hostname structure: {host}" + ) # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST + ) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or SpacesServiceRestInterceptor() + self._prep_wrapped_messages(client_info) + + class _CreateSpace(SpacesServiceRestStub): + def __hash__(self): + return hash("CreateSpace") + + def __call__( + self, + request: service.CreateSpaceRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resource.Space: + r"""Call the create space method over HTTP. + + Args: + request (~.service.CreateSpaceRequest): + The request object. Request to create a space. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.resource.Space: + `Developer + Preview `__. + Virtual place where conferences are held. Only one + active conference can be held in one space at any given + time. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2beta/spaces", + "body": "space", + }, + ] + request, metadata = self._interceptor.pre_create_space(request, metadata) + pb_request = service.CreateSpaceRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = resource.Space() + pb_resp = resource.Space.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_space(resp) + return resp + + class _EndActiveConference(SpacesServiceRestStub): + def __hash__(self): + return hash("EndActiveConference") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: service.EndActiveConferenceRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ): + r"""Call the end active conference method over HTTP. + + Args: + request (~.service.EndActiveConferenceRequest): + The request object. Request to end an ongoing conference + of a space. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2beta/{name=spaces/*}:endActiveConference", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_end_active_conference( + request, metadata + ) + pb_request = service.EndActiveConferenceRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + class _GetSpace(SpacesServiceRestStub): + def __hash__(self): + return hash("GetSpace") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: service.GetSpaceRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resource.Space: + r"""Call the get space method over HTTP. + + Args: + request (~.service.GetSpaceRequest): + The request object. Request to get a space. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.resource.Space: + `Developer + Preview `__. + Virtual place where conferences are held. Only one + active conference can be held in one space at any given + time. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2beta/{name=spaces/*}", + }, + ] + request, metadata = self._interceptor.pre_get_space(request, metadata) + pb_request = service.GetSpaceRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = resource.Space() + pb_resp = resource.Space.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_space(resp) + return resp + + class _UpdateSpace(SpacesServiceRestStub): + def __hash__(self): + return hash("UpdateSpace") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: service.UpdateSpaceRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resource.Space: + r"""Call the update space method over HTTP. + + Args: + request (~.service.UpdateSpaceRequest): + The request object. Request to update a space. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.resource.Space: + `Developer + Preview `__. + Virtual place where conferences are held. Only one + active conference can be held in one space at any given + time. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v2beta/{space.name=spaces/*}", + "body": "space", + }, + ] + request, metadata = self._interceptor.pre_update_space(request, metadata) + pb_request = service.UpdateSpaceRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = resource.Space() + pb_resp = resource.Space.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_space(resp) + return resp + + @property + def create_space(self) -> Callable[[service.CreateSpaceRequest], resource.Space]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateSpace(self._session, self._host, self._interceptor) # type: ignore + + @property + def end_active_conference( + self, + ) -> Callable[[service.EndActiveConferenceRequest], empty_pb2.Empty]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._EndActiveConference(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_space(self) -> Callable[[service.GetSpaceRequest], resource.Space]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetSpace(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_space(self) -> Callable[[service.UpdateSpaceRequest], resource.Space]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateSpace(self._session, self._host, self._interceptor) # type: ignore + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__ = ("SpacesServiceRestTransport",) diff --git a/packages/google-apps-meet/google/apps/meet_v2beta/types/__init__.py b/packages/google-apps-meet/google/apps/meet_v2beta/types/__init__.py new file mode 100644 index 000000000000..2c25509ad9bb --- /dev/null +++ b/packages/google-apps-meet/google/apps/meet_v2beta/types/__init__.py @@ -0,0 +1,94 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .resource import ( + ActiveConference, + AnonymousUser, + ConferenceRecord, + DocsDestination, + DriveDestination, + Participant, + ParticipantSession, + PhoneUser, + Recording, + SignedinUser, + Space, + SpaceConfig, + Transcript, + TranscriptEntry, +) +from .service import ( + CreateSpaceRequest, + EndActiveConferenceRequest, + GetConferenceRecordRequest, + GetParticipantRequest, + GetParticipantSessionRequest, + GetRecordingRequest, + GetSpaceRequest, + GetTranscriptEntryRequest, + GetTranscriptRequest, + ListConferenceRecordsRequest, + ListConferenceRecordsResponse, + ListParticipantSessionsRequest, + ListParticipantSessionsResponse, + ListParticipantsRequest, + ListParticipantsResponse, + ListRecordingsRequest, + ListRecordingsResponse, + ListTranscriptEntriesRequest, + ListTranscriptEntriesResponse, + ListTranscriptsRequest, + ListTranscriptsResponse, + UpdateSpaceRequest, +) + +__all__ = ( + "ActiveConference", + "AnonymousUser", + "ConferenceRecord", + "DocsDestination", + "DriveDestination", + "Participant", + "ParticipantSession", + "PhoneUser", + "Recording", + "SignedinUser", + "Space", + "SpaceConfig", + "Transcript", + "TranscriptEntry", + "CreateSpaceRequest", + "EndActiveConferenceRequest", + "GetConferenceRecordRequest", + "GetParticipantRequest", + "GetParticipantSessionRequest", + "GetRecordingRequest", + "GetSpaceRequest", + "GetTranscriptEntryRequest", + "GetTranscriptRequest", + "ListConferenceRecordsRequest", + "ListConferenceRecordsResponse", + "ListParticipantSessionsRequest", + "ListParticipantSessionsResponse", + "ListParticipantsRequest", + "ListParticipantsResponse", + "ListRecordingsRequest", + "ListRecordingsResponse", + "ListTranscriptEntriesRequest", + "ListTranscriptEntriesResponse", + "ListTranscriptsRequest", + "ListTranscriptsResponse", + "UpdateSpaceRequest", +) diff --git a/packages/google-apps-meet/google/apps/meet_v2beta/types/resource.py b/packages/google-apps-meet/google/apps/meet_v2beta/types/resource.py new file mode 100644 index 000000000000..36cd8f084163 --- /dev/null +++ b/packages/google-apps-meet/google/apps/meet_v2beta/types/resource.py @@ -0,0 +1,637 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +from google.protobuf import timestamp_pb2 # type: ignore +import proto # type: ignore + +__protobuf__ = proto.module( + package="google.apps.meet.v2beta", + manifest={ + "Space", + "ActiveConference", + "SpaceConfig", + "ConferenceRecord", + "Participant", + "ParticipantSession", + "SignedinUser", + "AnonymousUser", + "PhoneUser", + "Recording", + "DriveDestination", + "Transcript", + "DocsDestination", + "TranscriptEntry", + }, +) + + +class Space(proto.Message): + r"""`Developer + Preview `__. + Virtual place where conferences are held. Only one active conference + can be held in one space at any given time. + + Attributes: + name (str): + Immutable. Resource name of the space. Format: + ``spaces/{space}`` + meeting_uri (str): + Output only. URI used to join meeting, such as + ``https://meet.google.com/abc-mnop-xyz``. + meeting_code (str): + Output only. Type friendly code to join the meeting. Format: + ``[a-z]+-[a-z]+-[a-z]+`` such as ``abc-mnop-xyz``. The + maximum length is 128 characters. Can ONLY be used as alias + of the space ID to get the space. + config (google.apps.meet_v2beta.types.SpaceConfig): + Configuration pertaining to the meeting + space. + active_conference (google.apps.meet_v2beta.types.ActiveConference): + Active conference if it exists. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + meeting_uri: str = proto.Field( + proto.STRING, + number=2, + ) + meeting_code: str = proto.Field( + proto.STRING, + number=3, + ) + config: "SpaceConfig" = proto.Field( + proto.MESSAGE, + number=5, + message="SpaceConfig", + ) + active_conference: "ActiveConference" = proto.Field( + proto.MESSAGE, + number=6, + message="ActiveConference", + ) + + +class ActiveConference(proto.Message): + r"""Active conference. + + Attributes: + conference_record (str): + Output only. Reference to 'ConferenceRecord' resource. + Format: ``conferenceRecords/{conference_record}`` where + ``{conference_record}`` is a unique id for each instance of + a call within a space. + """ + + conference_record: str = proto.Field( + proto.STRING, + number=1, + ) + + +class SpaceConfig(proto.Message): + r"""The configuration pertaining to a meeting space. + + Attributes: + access_type (google.apps.meet_v2beta.types.SpaceConfig.AccessType): + Access type of the meeting space that + determines who can join without knocking. + Default: The user's default access settings. + Controlled by the user's admin for enterprise + users or RESTRICTED. + entry_point_access (google.apps.meet_v2beta.types.SpaceConfig.EntryPointAccess): + Defines the entry points that can be used to + join meetings hosted in this meeting space. + Default: EntryPointAccess.ALL + """ + + class AccessType(proto.Enum): + r"""Possible access types for a meeting space. + + Values: + ACCESS_TYPE_UNSPECIFIED (0): + Default value specified by the user's + organization. Note: This is never returned, as + the configured access type is returned instead. + OPEN (1): + Anyone with the join information (for + example, the URL or phone access information) + can join without knocking. + TRUSTED (2): + Members of the host's organization, invited + external users, and dial-in users can join + without knocking. Everyone else must knock. + RESTRICTED (3): + Only invitees can join without knocking. + Everyone else must knock. + """ + ACCESS_TYPE_UNSPECIFIED = 0 + OPEN = 1 + TRUSTED = 2 + RESTRICTED = 3 + + class EntryPointAccess(proto.Enum): + r"""Entry points that can be used to join a meeting. Example: + ``meet.google.com``, the Embed SDK Web, or a mobile application. + + Values: + ENTRY_POINT_ACCESS_UNSPECIFIED (0): + Unused. + ALL (1): + All entry points are allowed. + CREATOR_APP_ONLY (2): + Only entry points owned by the Google Cloud + project that created the space can be used to + join meetings in this space. Apps can use the + Embed SDK Web or mobile Meet SDKs to create + owned entry points. + """ + ENTRY_POINT_ACCESS_UNSPECIFIED = 0 + ALL = 1 + CREATOR_APP_ONLY = 2 + + access_type: AccessType = proto.Field( + proto.ENUM, + number=1, + enum=AccessType, + ) + entry_point_access: EntryPointAccess = proto.Field( + proto.ENUM, + number=2, + enum=EntryPointAccess, + ) + + +class ConferenceRecord(proto.Message): + r"""`Developer + Preview `__. Single + instance of a meeting held in a space. + + Attributes: + name (str): + Identifier. Resource name of the conference record. Format: + ``conferenceRecords/{conference_record}`` where + ``{conference_record}`` is a unique id for each instance of + a call within a space. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when the conference + started, always set. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when the conference + ended. Set for past conferences. Unset if the + conference is ongoing. + expire_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Server enforced expire time for + when this conference record resource is deleted. + The resource is deleted 30 days after the + conference ends. + space (str): + Output only. The space where the conference + was held. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + end_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + expire_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + space: str = proto.Field( + proto.STRING, + number=5, + ) + + +class Participant(proto.Message): + r"""`Developer + Preview `__. User + who attended or is attending a conference. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + signedin_user (google.apps.meet_v2beta.types.SignedinUser): + Signed-in user. + + This field is a member of `oneof`_ ``user``. + anonymous_user (google.apps.meet_v2beta.types.AnonymousUser): + Anonymous user. + + This field is a member of `oneof`_ ``user``. + phone_user (google.apps.meet_v2beta.types.PhoneUser): + User who calls in from their phone. + + This field is a member of `oneof`_ ``user``. + name (str): + Output only. Resource name of the participant. Format: + ``conferenceRecords/{conference_record}/participants/{participant}`` + earliest_start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the participant joined + the meeting for the first time. + latest_end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the participant left + the meeting for the last time. This can be null + if it is an active meeting. + """ + + signedin_user: "SignedinUser" = proto.Field( + proto.MESSAGE, + number=4, + oneof="user", + message="SignedinUser", + ) + anonymous_user: "AnonymousUser" = proto.Field( + proto.MESSAGE, + number=5, + oneof="user", + message="AnonymousUser", + ) + phone_user: "PhoneUser" = proto.Field( + proto.MESSAGE, + number=6, + oneof="user", + message="PhoneUser", + ) + name: str = proto.Field( + proto.STRING, + number=1, + ) + earliest_start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + latest_end_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=8, + message=timestamp_pb2.Timestamp, + ) + + +class ParticipantSession(proto.Message): + r"""`Developer + Preview `__. Refers + to each unique join/leave session when a user joins a conference + from a device. Note that any time a user joins the conference a new + unique ID is assigned. That means if a user joins a space multiple + times from the same device, they're assigned different IDs, and are + also be treated as different participant sessions. + + Attributes: + name (str): + Identifier. Session id. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when the user session + started. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when the user session + ended. Unset if the user session hasn’t ended. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + end_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + +class SignedinUser(proto.Message): + r"""A signed-in user can be: + + a) An individual joining from a personal computer, mobile + device, or through companion mode. + b) A robot account used by conference room devices. + + Attributes: + user (str): + Output only. Unique ID for the user. Interoperable with + Admin SDK API and People API. Format: ``users/{user}`` + display_name (str): + Output only. For a personal device, it's the + user's first and last name. For a robot account, + it's the admin specified device name. For + example, "Altostrat Room". + """ + + user: str = proto.Field( + proto.STRING, + number=1, + ) + display_name: str = proto.Field( + proto.STRING, + number=2, + ) + + +class AnonymousUser(proto.Message): + r"""User who joins anonymously (meaning not signed into a Google + Account). + + Attributes: + display_name (str): + Output only. User provided name when they + join a conference anonymously. + """ + + display_name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class PhoneUser(proto.Message): + r"""User dialing in from a phone where the user's identity is + unknown because they haven't signed in with a Google Account. + + Attributes: + display_name (str): + Output only. Partially redacted user's phone + number when they call in. + """ + + display_name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class Recording(proto.Message): + r"""`Developer + Preview `__. + Metadata about a recording created during a conference. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + drive_destination (google.apps.meet_v2beta.types.DriveDestination): + Output only. Recording is saved to Google Drive as an mp4 + file. The ``drive_destination`` includes the Drive + ``fileId`` that can be used to download the file using the + ``files.get`` method of the Drive API. + + This field is a member of `oneof`_ ``destination``. + name (str): + Output only. Resource name of the recording. Format: + ``conferenceRecords/{conference_record}/recordings/{recording}`` + where ``{recording}`` is a 1:1 mapping to each unique + recording session during the conference. + state (google.apps.meet_v2beta.types.Recording.State): + Output only. Current state. + """ + + class State(proto.Enum): + r"""Current state of the recording session. + + Values: + STATE_UNSPECIFIED (0): + Default, never used. + STARTED (1): + An active recording session has started. + ENDED (2): + This recording session has ended, but the + recording file hasn't been generated yet. + FILE_GENERATED (3): + Recording file is generated and ready to + download. + """ + STATE_UNSPECIFIED = 0 + STARTED = 1 + ENDED = 2 + FILE_GENERATED = 3 + + drive_destination: "DriveDestination" = proto.Field( + proto.MESSAGE, + number=6, + oneof="destination", + message="DriveDestination", + ) + name: str = proto.Field( + proto.STRING, + number=1, + ) + state: State = proto.Field( + proto.ENUM, + number=3, + enum=State, + ) + + +class DriveDestination(proto.Message): + r"""Export location where a recording file is saved in Google + Drive. + + Attributes: + file (str): + Output only. The ``fileId`` for the underlying MP4 file. For + example, "1kuceFZohVoCh6FulBHxwy6I15Ogpc4hP". Use + ``$ GET https://www.googleapis.com/drive/v3/files/{$fileId}?alt=media`` + to download the blob. For more information, see + https://developers.google.com/drive/api/v3/reference/files/get. + export_uri (str): + Output only. Link used to play back the recording file in + the browser. For example, + ``https://drive.google.com/file/d/{$fileId}/view``. + """ + + file: str = proto.Field( + proto.STRING, + number=1, + ) + export_uri: str = proto.Field( + proto.STRING, + number=2, + ) + + +class Transcript(proto.Message): + r"""`Developer + Preview `__. + Metadata for a transcript generated from a conference. It refers to + the ASR (Automatic Speech Recognition) result of user's speech + during the conference. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + docs_destination (google.apps.meet_v2beta.types.DocsDestination): + Output only. Where the Google Docs transcript + is saved. + + This field is a member of `oneof`_ ``destination``. + name (str): + Output only. Resource name of the transcript. Format: + ``conferenceRecords/{conference_record}/transcripts/{transcript}``, + where ``{transcript}`` is a 1:1 mapping to each unique + transcription session of the conference. + state (google.apps.meet_v2beta.types.Transcript.State): + Output only. Current state. + """ + + class State(proto.Enum): + r"""Current state of the transcript session. + + Values: + STATE_UNSPECIFIED (0): + Default, never used. + STARTED (1): + An active transcript session has started. + ENDED (2): + This transcript session has ended, but the + transcript file hasn't been generated yet. + FILE_GENERATED (3): + Transcript file is generated and ready to + download. + """ + STATE_UNSPECIFIED = 0 + STARTED = 1 + ENDED = 2 + FILE_GENERATED = 3 + + docs_destination: "DocsDestination" = proto.Field( + proto.MESSAGE, + number=6, + oneof="destination", + message="DocsDestination", + ) + name: str = proto.Field( + proto.STRING, + number=1, + ) + state: State = proto.Field( + proto.ENUM, + number=3, + enum=State, + ) + + +class DocsDestination(proto.Message): + r"""Google Docs location where the transcript file is saved. + + Attributes: + document (str): + Output only. The document ID for the underlying Google Docs + transcript file. For example, + "1kuceFZohVoCh6FulBHxwy6I15Ogpc4hP". Use the + ``documents.get`` method of the Google Docs API + (https://developers.google.com/docs/api/reference/rest/v1/documents/get) + to fetch the content. + export_uri (str): + Output only. URI for the Google Docs transcript file. Use + ``https://docs.google.com/document/d/{$DocumentId}/view`` to + browse the transcript in the browser. + """ + + document: str = proto.Field( + proto.STRING, + number=1, + ) + export_uri: str = proto.Field( + proto.STRING, + number=2, + ) + + +class TranscriptEntry(proto.Message): + r"""`Developer + Preview `__. Single + entry for one user’s speech during a transcript session. + + Attributes: + name (str): + Output only. Resource name of the entry. Format: + "conferenceRecords/{conference_record}/transcripts/{transcript}/entries/{entry}". + participant (str): + Output only. Refer to the participant who + speaks. + text (str): + Output only. The transcribed text of the + participant's voice, at maximum 10K words. Note + that the limit is subject to change. + language_code (str): + Output only. Language of spoken text, such as + "en-US". IETF BCP 47 syntax + (https://tools.ietf.org/html/bcp47) + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when the transcript + entry started. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when the transcript + entry ended. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + participant: str = proto.Field( + proto.STRING, + number=2, + ) + text: str = proto.Field( + proto.STRING, + number=3, + ) + language_code: str = proto.Field( + proto.STRING, + number=4, + ) + start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + end_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-apps-meet/google/apps/meet_v2beta/types/service.py b/packages/google-apps-meet/google/apps/meet_v2beta/types/service.py new file mode 100644 index 000000000000..597f0e5dcfe6 --- /dev/null +++ b/packages/google-apps-meet/google/apps/meet_v2beta/types/service.py @@ -0,0 +1,615 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +from google.protobuf import field_mask_pb2 # type: ignore +import proto # type: ignore + +from google.apps.meet_v2beta.types import resource + +__protobuf__ = proto.module( + package="google.apps.meet.v2beta", + manifest={ + "CreateSpaceRequest", + "GetSpaceRequest", + "UpdateSpaceRequest", + "EndActiveConferenceRequest", + "GetConferenceRecordRequest", + "ListConferenceRecordsRequest", + "ListConferenceRecordsResponse", + "GetParticipantRequest", + "ListParticipantsRequest", + "ListParticipantsResponse", + "GetParticipantSessionRequest", + "ListParticipantSessionsRequest", + "ListParticipantSessionsResponse", + "GetRecordingRequest", + "ListRecordingsRequest", + "ListRecordingsResponse", + "GetTranscriptRequest", + "ListTranscriptsRequest", + "ListTranscriptsResponse", + "GetTranscriptEntryRequest", + "ListTranscriptEntriesRequest", + "ListTranscriptEntriesResponse", + }, +) + + +class CreateSpaceRequest(proto.Message): + r"""Request to create a space. + + Attributes: + space (google.apps.meet_v2beta.types.Space): + Space to be created. As of May 2023, the + input space can be empty. Later on the input + space can be non-empty when space configuration + is introduced. + """ + + space: resource.Space = proto.Field( + proto.MESSAGE, + number=1, + message=resource.Space, + ) + + +class GetSpaceRequest(proto.Message): + r"""Request to get a space. + + Attributes: + name (str): + Required. Resource name of the space. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class UpdateSpaceRequest(proto.Message): + r"""Request to update a space. + + Attributes: + space (google.apps.meet_v2beta.types.Space): + Required. Space to be updated. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. Field mask used to specify the fields to be + updated in the space. If update_mask isn't provided, it + defaults to '*' and updates all fields provided in the + request, including deleting fields not set in the request. + """ + + space: resource.Space = proto.Field( + proto.MESSAGE, + number=1, + message=resource.Space, + ) + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class EndActiveConferenceRequest(proto.Message): + r"""Request to end an ongoing conference of a space. + + Attributes: + name (str): + Required. Resource name of the space. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class GetConferenceRecordRequest(proto.Message): + r"""Request to get a conference record. + + Attributes: + name (str): + Required. Resource name of the conference. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListConferenceRecordsRequest(proto.Message): + r"""Request to fetch list of conference records per user. + + Attributes: + page_size (int): + Optional. Maximum number of conference + records to return. The service might return + fewer than this value. If unspecified, at most + 25 conference records are returned. The maximum + value is 100; values above 100 are coerced to + 100. Maximum might change in the future. + page_token (str): + Optional. Page token returned from previous + List Call. + filter (str): + Optional. User specified filtering condition in EBNF format. + The following are the filterable fields: + + - ``space.meeting_code`` + - ``space.name`` + - ``start_time`` + - ``end_time`` + + For example, ``space.meeting_code = "abc-mnop-xyz"``. + """ + + page_size: int = proto.Field( + proto.INT32, + number=1, + ) + page_token: str = proto.Field( + proto.STRING, + number=2, + ) + filter: str = proto.Field( + proto.STRING, + number=3, + ) + + +class ListConferenceRecordsResponse(proto.Message): + r"""Response of ListConferenceRecords method. + + Attributes: + conference_records (MutableSequence[google.apps.meet_v2beta.types.ConferenceRecord]): + List of conferences in one page. + next_page_token (str): + Token to be circulated back for further List + call if current List does NOT include all the + Conferences. Unset if all conferences have been + returned. + """ + + @property + def raw_page(self): + return self + + conference_records: MutableSequence[ + resource.ConferenceRecord + ] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=resource.ConferenceRecord, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class GetParticipantRequest(proto.Message): + r"""Request to get a Participant. + + Attributes: + name (str): + Required. Resource name of the participant. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListParticipantsRequest(proto.Message): + r"""Request to fetch list of participant per conference. + + Attributes: + parent (str): + Required. Format: ``conferenceRecords/{conference_record}`` + page_size (int): + Maximum number of participants to return. The + service might return fewer than this value. + If unspecified, at most 100 participants are + returned. The maximum value is 250; values above + 250 are coerced to 250. Maximum might change in + the future. + page_token (str): + Page token returned from previous List Call. + filter (str): + Optional. User specified filtering condition in EBNF format. + The following are the filterable fields: + + - ``earliest_start_time`` + - ``latest_end_time`` + + For example, ``latest_end_time IS NULL`` returns active + participants in the conference. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + filter: str = proto.Field( + proto.STRING, + number=4, + ) + + +class ListParticipantsResponse(proto.Message): + r"""Response of ListParticipants method. + + Attributes: + participants (MutableSequence[google.apps.meet_v2beta.types.Participant]): + List of participants in one page. + next_page_token (str): + Token to be circulated back for further List + call if current List doesn't include all the + participants. Unset if all participants are + returned. + total_size (int): + Total, exact number of ``participants``. By default, this + field isn't included in the response. Set the field mask in + `SystemParameterContext `__ + to receive this field in the response. + """ + + @property + def raw_page(self): + return self + + participants: MutableSequence[resource.Participant] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=resource.Participant, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + total_size: int = proto.Field( + proto.INT32, + number=3, + ) + + +class GetParticipantSessionRequest(proto.Message): + r"""Request to get a participant session. + + Attributes: + name (str): + Required. Resource name of the participant. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListParticipantSessionsRequest(proto.Message): + r"""Request to fetch list of participant sessions per conference + record per participant. + + Attributes: + parent (str): + Required. Format: + ``conferenceRecords/{conference_record}/participants/{participant}`` + page_size (int): + Optional. Maximum number of participant + sessions to return. The service might return + fewer than this value. If unspecified, at most + 100 participants are returned. The maximum value + is 250; values above 250 are coerced to 250. + Maximum might change in the future. + page_token (str): + Optional. Page token returned from previous + List Call. + filter (str): + Optional. User specified filtering condition in EBNF format. + The following are the filterable fields: + + - ``start_time`` + - ``end_time`` + + For example, ``end_time IS NULL`` returns active participant + sessions in the conference record. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + filter: str = proto.Field( + proto.STRING, + number=4, + ) + + +class ListParticipantSessionsResponse(proto.Message): + r"""Response of ListParticipants method. + + Attributes: + participant_sessions (MutableSequence[google.apps.meet_v2beta.types.ParticipantSession]): + List of participants in one page. + next_page_token (str): + Token to be circulated back for further List + call if current List doesn't include all the + participants. Unset if all participants are + returned. + """ + + @property + def raw_page(self): + return self + + participant_sessions: MutableSequence[ + resource.ParticipantSession + ] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=resource.ParticipantSession, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class GetRecordingRequest(proto.Message): + r"""Request message for GetRecording method. + + Attributes: + name (str): + Required. Resource name of the recording. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListRecordingsRequest(proto.Message): + r"""Request for ListRecordings method. + + Attributes: + parent (str): + Required. Format: ``conferenceRecords/{conference_record}`` + page_size (int): + Maximum number of recordings to return. The + service might return fewer than this value. + If unspecified, at most 10 recordings are + returned. The maximum value is 100; values above + 100 are coerced to 100. Maximum might change in + the future. + page_token (str): + Page token returned from previous List Call. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + + +class ListRecordingsResponse(proto.Message): + r"""Response for ListRecordings method. + + Attributes: + recordings (MutableSequence[google.apps.meet_v2beta.types.Recording]): + List of recordings in one page. + next_page_token (str): + Token to be circulated back for further List + call if current List doesn't include all the + recordings. Unset if all recordings are + returned. + """ + + @property + def raw_page(self): + return self + + recordings: MutableSequence[resource.Recording] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=resource.Recording, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class GetTranscriptRequest(proto.Message): + r"""Request for GetTranscript method. + + Attributes: + name (str): + Required. Resource name of the transcript. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListTranscriptsRequest(proto.Message): + r"""Request for ListTranscripts method. + + Attributes: + parent (str): + Required. Format: ``conferenceRecords/{conference_record}`` + page_size (int): + Maximum number of transcripts to return. The + service might return fewer than this value. + If unspecified, at most 10 transcripts are + returned. The maximum value is 100; values above + 100 are coerced to 100. Maximum might change in + the future. + page_token (str): + Page token returned from previous List Call. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + + +class ListTranscriptsResponse(proto.Message): + r"""Response for ListTranscripts method. + + Attributes: + transcripts (MutableSequence[google.apps.meet_v2beta.types.Transcript]): + List of transcripts in one page. + next_page_token (str): + Token to be circulated back for further List + call if current List doesn't include all the + transcripts. Unset if all transcripts are + returned. + """ + + @property + def raw_page(self): + return self + + transcripts: MutableSequence[resource.Transcript] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=resource.Transcript, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class GetTranscriptEntryRequest(proto.Message): + r"""Request for GetTranscriptEntry method. + + Attributes: + name (str): + Required. Resource name of the ``TranscriptEntry``. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListTranscriptEntriesRequest(proto.Message): + r"""Request for ListTranscriptEntries method. + + Attributes: + parent (str): + Required. Format: + ``conferenceRecords/{conference_record}/transcripts/{transcript}`` + page_size (int): + Maximum number of entries to return. The + service might return fewer than this value. + If unspecified, at most 10 entries are returned. + The maximum value is 100; values above 100 are + coerced to 100. Maximum might change in the + future. + page_token (str): + Page token returned from previous List Call. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + + +class ListTranscriptEntriesResponse(proto.Message): + r"""Response for ListTranscriptEntries method + + Attributes: + transcript_entries (MutableSequence[google.apps.meet_v2beta.types.TranscriptEntry]): + List of TranscriptEntries in one page. + next_page_token (str): + Token to be circulated back for further List + call if current List doesn't include all the + transcript entries. Unset if all entries are + returned. + """ + + @property + def raw_page(self): + return self + + transcript_entries: MutableSequence[resource.TranscriptEntry] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=resource.TranscriptEntry, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-apps-meet/mypy.ini b/packages/google-apps-meet/mypy.ini new file mode 100644 index 000000000000..574c5aed394b --- /dev/null +++ b/packages/google-apps-meet/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +python_version = 3.7 +namespace_packages = True diff --git a/packages/google-apps-meet/noxfile.py b/packages/google-apps-meet/noxfile.py new file mode 100644 index 000000000000..7d3551347c78 --- /dev/null +++ b/packages/google-apps-meet/noxfile.py @@ -0,0 +1,410 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Generated by synthtool. DO NOT EDIT! + +from __future__ import absolute_import + +import os +import pathlib +import re +import shutil +from typing import Dict, List +import warnings + +import nox + +BLACK_VERSION = "black[jupyter]==23.7.0" +ISORT_VERSION = "isort==5.11.0" + +LINT_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] + + +DEFAULT_PYTHON_VERSION = "3.10" + +UNIT_TEST_PYTHON_VERSIONS: List[str] = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] +UNIT_TEST_STANDARD_DEPENDENCIES = [ + "mock", + "asyncmock", + "pytest", + "pytest-cov", + "pytest-asyncio", +] +UNIT_TEST_EXTERNAL_DEPENDENCIES: List[str] = [] +UNIT_TEST_LOCAL_DEPENDENCIES: List[str] = [] +UNIT_TEST_DEPENDENCIES: List[str] = [] +UNIT_TEST_EXTRAS: List[str] = [] +UNIT_TEST_EXTRAS_BY_PYTHON: Dict[str, List[str]] = {} + +SYSTEM_TEST_PYTHON_VERSIONS: List[str] = ["3.8", "3.9", "3.10", "3.11", "3.12"] +SYSTEM_TEST_STANDARD_DEPENDENCIES = [ + "mock", + "pytest", + "google-cloud-testutils", +] +SYSTEM_TEST_EXTERNAL_DEPENDENCIES: List[str] = [] +SYSTEM_TEST_LOCAL_DEPENDENCIES: List[str] = [] +SYSTEM_TEST_DEPENDENCIES: List[str] = [] +SYSTEM_TEST_EXTRAS: List[str] = [] +SYSTEM_TEST_EXTRAS_BY_PYTHON: Dict[str, List[str]] = {} + +CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() + +# 'docfx' is excluded since it only needs to run in 'docs-presubmit' +nox.options.sessions = [ + "unit", + "system", + "cover", + "lint", + "lint_setup_py", + "blacken", + "docs", +] + +# Error if a python version is missing +nox.options.error_on_missing_interpreters = True + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def lint(session): + """Run linters. + + Returns a failure if the linters find linting errors or sufficiently + serious code quality issues. + """ + session.install("flake8", BLACK_VERSION) + session.run( + "black", + "--check", + *LINT_PATHS, + ) + + session.run("flake8", "google", "tests") + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def blacken(session): + """Run black. Format code to uniform standard.""" + session.install(BLACK_VERSION) + session.run( + "black", + *LINT_PATHS, + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def format(session): + """ + Run isort to sort imports. Then run black + to format code to uniform standard. + """ + session.install(BLACK_VERSION, ISORT_VERSION) + # Use the --fss option to sort imports using strict alphabetical order. + # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections + session.run( + "isort", + "--fss", + *LINT_PATHS, + ) + session.run( + "black", + *LINT_PATHS, + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def lint_setup_py(session): + """Verify that setup.py is valid (including RST check).""" + session.install("docutils", "pygments") + session.run("python", "setup.py", "check", "--restructuredtext", "--strict") + + +def install_unittest_dependencies(session, *constraints): + standard_deps = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_DEPENDENCIES + session.install(*standard_deps, *constraints) + + if UNIT_TEST_EXTERNAL_DEPENDENCIES: + warnings.warn( + "'unit_test_external_dependencies' is deprecated. Instead, please " + "use 'unit_test_dependencies' or 'unit_test_local_dependencies'.", + DeprecationWarning, + ) + session.install(*UNIT_TEST_EXTERNAL_DEPENDENCIES, *constraints) + + if UNIT_TEST_LOCAL_DEPENDENCIES: + session.install(*UNIT_TEST_LOCAL_DEPENDENCIES, *constraints) + + if UNIT_TEST_EXTRAS_BY_PYTHON: + extras = UNIT_TEST_EXTRAS_BY_PYTHON.get(session.python, []) + elif UNIT_TEST_EXTRAS: + extras = UNIT_TEST_EXTRAS + else: + extras = [] + + if extras: + session.install("-e", f".[{','.join(extras)}]", *constraints) + else: + session.install("-e", ".", *constraints) + + +def default(session): + # Install all test dependencies, then install this package in-place. + + constraints_path = str( + CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" + ) + install_unittest_dependencies(session, "-c", constraints_path) + + # Run py.test against the unit tests. + session.run( + "py.test", + "--quiet", + f"--junitxml=unit_{session.python}_sponge_log.xml", + "--cov=google", + "--cov=tests/unit", + "--cov-append", + "--cov-config=.coveragerc", + "--cov-report=", + "--cov-fail-under=0", + os.path.join("tests", "unit"), + *session.posargs, + ) + + +@nox.session(python=UNIT_TEST_PYTHON_VERSIONS) +def unit(session): + """Run the unit test suite.""" + default(session) + + +def install_systemtest_dependencies(session, *constraints): + # Use pre-release gRPC for system tests. + # Exclude version 1.52.0rc1 which has a known issue. + # See https://github.com/grpc/grpc/issues/32163 + session.install("--pre", "grpcio!=1.52.0rc1") + + session.install(*SYSTEM_TEST_STANDARD_DEPENDENCIES, *constraints) + + if SYSTEM_TEST_EXTERNAL_DEPENDENCIES: + session.install(*SYSTEM_TEST_EXTERNAL_DEPENDENCIES, *constraints) + + if SYSTEM_TEST_LOCAL_DEPENDENCIES: + session.install("-e", *SYSTEM_TEST_LOCAL_DEPENDENCIES, *constraints) + + if SYSTEM_TEST_DEPENDENCIES: + session.install("-e", *SYSTEM_TEST_DEPENDENCIES, *constraints) + + if SYSTEM_TEST_EXTRAS_BY_PYTHON: + extras = SYSTEM_TEST_EXTRAS_BY_PYTHON.get(session.python, []) + elif SYSTEM_TEST_EXTRAS: + extras = SYSTEM_TEST_EXTRAS + else: + extras = [] + + if extras: + session.install("-e", f".[{','.join(extras)}]", *constraints) + else: + session.install("-e", ".", *constraints) + + +@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) +def system(session): + """Run the system test suite.""" + constraints_path = str( + CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" + ) + system_test_path = os.path.join("tests", "system.py") + system_test_folder_path = os.path.join("tests", "system") + + # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true. + if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false": + session.skip("RUN_SYSTEM_TESTS is set to false, skipping") + # Install pyopenssl for mTLS testing. + if os.environ.get("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true": + session.install("pyopenssl") + + system_test_exists = os.path.exists(system_test_path) + system_test_folder_exists = os.path.exists(system_test_folder_path) + # Sanity check: only run tests if found. + if not system_test_exists and not system_test_folder_exists: + session.skip("System tests were not found") + + install_systemtest_dependencies(session, "-c", constraints_path) + + # Run py.test against the system tests. + if system_test_exists: + session.run( + "py.test", + "--quiet", + f"--junitxml=system_{session.python}_sponge_log.xml", + system_test_path, + *session.posargs, + ) + if system_test_folder_exists: + session.run( + "py.test", + "--quiet", + f"--junitxml=system_{session.python}_sponge_log.xml", + system_test_folder_path, + *session.posargs, + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def cover(session): + """Run the final coverage report. + + This outputs the coverage report aggregating coverage from the unit + test runs (not system test runs), and then erases coverage data. + """ + session.install("coverage", "pytest-cov") + session.run("coverage", "report", "--show-missing", "--fail-under=100") + + session.run("coverage", "erase") + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def docs(session): + """Build the docs for this library.""" + + session.install("-e", ".") + session.install( + "sphinx==4.5.0", + "alabaster", + "recommonmark", + ) + + shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) + session.run( + "sphinx-build", + "-W", # warnings as errors + "-T", # show full traceback on exception + "-N", # no colors + "-b", + "html", + "-d", + os.path.join("docs", "_build", "doctrees", ""), + os.path.join("docs", ""), + os.path.join("docs", "_build", "html", ""), + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def docfx(session): + """Build the docfx yaml files for this library.""" + + session.install("-e", ".") + session.install( + "gcp-sphinx-docfx-yaml", + "alabaster", + "recommonmark", + ) + + shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) + session.run( + "sphinx-build", + "-T", # show full traceback on exception + "-N", # no colors + "-D", + ( + "extensions=sphinx.ext.autodoc," + "sphinx.ext.autosummary," + "docfx_yaml.extension," + "sphinx.ext.intersphinx," + "sphinx.ext.coverage," + "sphinx.ext.napoleon," + "sphinx.ext.todo," + "sphinx.ext.viewcode," + "recommonmark" + ), + "-b", + "html", + "-d", + os.path.join("docs", "_build", "doctrees", ""), + os.path.join("docs", ""), + os.path.join("docs", "_build", "html", ""), + ) + + +@nox.session(python="3.12") +def prerelease_deps(session): + """Run all tests with prerelease versions of dependencies installed.""" + + # Install all dependencies + session.install("-e", ".[all, tests, tracing]") + unit_deps_all = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_EXTERNAL_DEPENDENCIES + session.install(*unit_deps_all) + system_deps_all = ( + SYSTEM_TEST_STANDARD_DEPENDENCIES + + SYSTEM_TEST_EXTERNAL_DEPENDENCIES + + SYSTEM_TEST_EXTRAS + ) + session.install(*system_deps_all) + + # Because we test minimum dependency versions on the minimum Python + # version, the first version we test with in the unit tests sessions has a + # constraints file containing all dependencies and extras. + with open( + CURRENT_DIRECTORY + / "testing" + / f"constraints-{UNIT_TEST_PYTHON_VERSIONS[0]}.txt", + encoding="utf-8", + ) as constraints_file: + constraints_text = constraints_file.read() + + # Ignore leading whitespace and comment lines. + constraints_deps = [ + match.group(1) + for match in re.finditer( + r"^\s*(\S+)(?===\S+)", constraints_text, flags=re.MULTILINE + ) + ] + + session.install(*constraints_deps) + + prerel_deps = [ + "protobuf", + # dependency of grpc + "six", + "googleapis-common-protos", + # Exclude version 1.52.0rc1 which has a known issue. See https://github.com/grpc/grpc/issues/32163 + "grpcio!=1.52.0rc1", + "grpcio-status", + "google-api-core", + "google-auth", + "proto-plus", + "google-cloud-testutils", + # dependencies of google-cloud-testutils" + "click", + ] + + for dep in prerel_deps: + session.install("--pre", "--no-deps", "--upgrade", dep) + + # Remaining dependencies + other_deps = [ + "requests", + ] + session.install(*other_deps) + + # Print out prerelease package versions + session.run( + "python", "-c", "import google.protobuf; print(google.protobuf.__version__)" + ) + session.run("python", "-c", "import grpc; print(grpc.__version__)") + session.run("python", "-c", "import google.auth; print(google.auth.__version__)") + + session.run("py.test", "tests/unit") diff --git a/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_conference_record_async.py b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_conference_record_async.py new file mode 100644 index 000000000000..4fccaa070268 --- /dev/null +++ b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_conference_record_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetConferenceRecord +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-apps-meet + + +# [START meet_v2beta_generated_ConferenceRecordsService_GetConferenceRecord_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.apps import meet_v2beta + + +async def sample_get_conference_record(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceAsyncClient() + + # Initialize request argument(s) + request = meet_v2beta.GetConferenceRecordRequest( + name="name_value", + ) + + # Make the request + response = await client.get_conference_record(request=request) + + # Handle the response + print(response) + +# [END meet_v2beta_generated_ConferenceRecordsService_GetConferenceRecord_async] diff --git a/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_conference_record_sync.py b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_conference_record_sync.py new file mode 100644 index 000000000000..cb817f2458fb --- /dev/null +++ b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_conference_record_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetConferenceRecord +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-apps-meet + + +# [START meet_v2beta_generated_ConferenceRecordsService_GetConferenceRecord_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.apps import meet_v2beta + + +def sample_get_conference_record(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceClient() + + # Initialize request argument(s) + request = meet_v2beta.GetConferenceRecordRequest( + name="name_value", + ) + + # Make the request + response = client.get_conference_record(request=request) + + # Handle the response + print(response) + +# [END meet_v2beta_generated_ConferenceRecordsService_GetConferenceRecord_sync] diff --git a/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_participant_async.py b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_participant_async.py new file mode 100644 index 000000000000..fb8ec9fa0bfd --- /dev/null +++ b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_participant_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetParticipant +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-apps-meet + + +# [START meet_v2beta_generated_ConferenceRecordsService_GetParticipant_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.apps import meet_v2beta + + +async def sample_get_participant(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceAsyncClient() + + # Initialize request argument(s) + request = meet_v2beta.GetParticipantRequest( + name="name_value", + ) + + # Make the request + response = await client.get_participant(request=request) + + # Handle the response + print(response) + +# [END meet_v2beta_generated_ConferenceRecordsService_GetParticipant_async] diff --git a/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_participant_session_async.py b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_participant_session_async.py new file mode 100644 index 000000000000..9cf6b1b59bcf --- /dev/null +++ b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_participant_session_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetParticipantSession +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-apps-meet + + +# [START meet_v2beta_generated_ConferenceRecordsService_GetParticipantSession_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.apps import meet_v2beta + + +async def sample_get_participant_session(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceAsyncClient() + + # Initialize request argument(s) + request = meet_v2beta.GetParticipantSessionRequest( + name="name_value", + ) + + # Make the request + response = await client.get_participant_session(request=request) + + # Handle the response + print(response) + +# [END meet_v2beta_generated_ConferenceRecordsService_GetParticipantSession_async] diff --git a/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_participant_session_sync.py b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_participant_session_sync.py new file mode 100644 index 000000000000..9e74f86a34ed --- /dev/null +++ b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_participant_session_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetParticipantSession +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-apps-meet + + +# [START meet_v2beta_generated_ConferenceRecordsService_GetParticipantSession_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.apps import meet_v2beta + + +def sample_get_participant_session(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceClient() + + # Initialize request argument(s) + request = meet_v2beta.GetParticipantSessionRequest( + name="name_value", + ) + + # Make the request + response = client.get_participant_session(request=request) + + # Handle the response + print(response) + +# [END meet_v2beta_generated_ConferenceRecordsService_GetParticipantSession_sync] diff --git a/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_participant_sync.py b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_participant_sync.py new file mode 100644 index 000000000000..696cf2fe7691 --- /dev/null +++ b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_participant_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetParticipant +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-apps-meet + + +# [START meet_v2beta_generated_ConferenceRecordsService_GetParticipant_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.apps import meet_v2beta + + +def sample_get_participant(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceClient() + + # Initialize request argument(s) + request = meet_v2beta.GetParticipantRequest( + name="name_value", + ) + + # Make the request + response = client.get_participant(request=request) + + # Handle the response + print(response) + +# [END meet_v2beta_generated_ConferenceRecordsService_GetParticipant_sync] diff --git a/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_recording_async.py b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_recording_async.py new file mode 100644 index 000000000000..2793c766b8ca --- /dev/null +++ b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_recording_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetRecording +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-apps-meet + + +# [START meet_v2beta_generated_ConferenceRecordsService_GetRecording_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.apps import meet_v2beta + + +async def sample_get_recording(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceAsyncClient() + + # Initialize request argument(s) + request = meet_v2beta.GetRecordingRequest( + name="name_value", + ) + + # Make the request + response = await client.get_recording(request=request) + + # Handle the response + print(response) + +# [END meet_v2beta_generated_ConferenceRecordsService_GetRecording_async] diff --git a/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_recording_sync.py b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_recording_sync.py new file mode 100644 index 000000000000..0c835e68f7d4 --- /dev/null +++ b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_recording_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetRecording +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-apps-meet + + +# [START meet_v2beta_generated_ConferenceRecordsService_GetRecording_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.apps import meet_v2beta + + +def sample_get_recording(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceClient() + + # Initialize request argument(s) + request = meet_v2beta.GetRecordingRequest( + name="name_value", + ) + + # Make the request + response = client.get_recording(request=request) + + # Handle the response + print(response) + +# [END meet_v2beta_generated_ConferenceRecordsService_GetRecording_sync] diff --git a/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_transcript_async.py b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_transcript_async.py new file mode 100644 index 000000000000..aa98597fa09a --- /dev/null +++ b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_transcript_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTranscript +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-apps-meet + + +# [START meet_v2beta_generated_ConferenceRecordsService_GetTranscript_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.apps import meet_v2beta + + +async def sample_get_transcript(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceAsyncClient() + + # Initialize request argument(s) + request = meet_v2beta.GetTranscriptRequest( + name="name_value", + ) + + # Make the request + response = await client.get_transcript(request=request) + + # Handle the response + print(response) + +# [END meet_v2beta_generated_ConferenceRecordsService_GetTranscript_async] diff --git a/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_transcript_entry_async.py b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_transcript_entry_async.py new file mode 100644 index 000000000000..b80da75d6787 --- /dev/null +++ b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_transcript_entry_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTranscriptEntry +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-apps-meet + + +# [START meet_v2beta_generated_ConferenceRecordsService_GetTranscriptEntry_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.apps import meet_v2beta + + +async def sample_get_transcript_entry(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceAsyncClient() + + # Initialize request argument(s) + request = meet_v2beta.GetTranscriptEntryRequest( + name="name_value", + ) + + # Make the request + response = await client.get_transcript_entry(request=request) + + # Handle the response + print(response) + +# [END meet_v2beta_generated_ConferenceRecordsService_GetTranscriptEntry_async] diff --git a/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_transcript_entry_sync.py b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_transcript_entry_sync.py new file mode 100644 index 000000000000..5f7ebf08f585 --- /dev/null +++ b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_transcript_entry_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTranscriptEntry +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-apps-meet + + +# [START meet_v2beta_generated_ConferenceRecordsService_GetTranscriptEntry_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.apps import meet_v2beta + + +def sample_get_transcript_entry(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceClient() + + # Initialize request argument(s) + request = meet_v2beta.GetTranscriptEntryRequest( + name="name_value", + ) + + # Make the request + response = client.get_transcript_entry(request=request) + + # Handle the response + print(response) + +# [END meet_v2beta_generated_ConferenceRecordsService_GetTranscriptEntry_sync] diff --git a/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_transcript_sync.py b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_transcript_sync.py new file mode 100644 index 000000000000..31a589c58369 --- /dev/null +++ b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_get_transcript_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTranscript +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-apps-meet + + +# [START meet_v2beta_generated_ConferenceRecordsService_GetTranscript_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.apps import meet_v2beta + + +def sample_get_transcript(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceClient() + + # Initialize request argument(s) + request = meet_v2beta.GetTranscriptRequest( + name="name_value", + ) + + # Make the request + response = client.get_transcript(request=request) + + # Handle the response + print(response) + +# [END meet_v2beta_generated_ConferenceRecordsService_GetTranscript_sync] diff --git a/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_conference_records_async.py b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_conference_records_async.py new file mode 100644 index 000000000000..4a1c17b25279 --- /dev/null +++ b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_conference_records_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListConferenceRecords +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-apps-meet + + +# [START meet_v2beta_generated_ConferenceRecordsService_ListConferenceRecords_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.apps import meet_v2beta + + +async def sample_list_conference_records(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceAsyncClient() + + # Initialize request argument(s) + request = meet_v2beta.ListConferenceRecordsRequest( + ) + + # Make the request + page_result = client.list_conference_records(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END meet_v2beta_generated_ConferenceRecordsService_ListConferenceRecords_async] diff --git a/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_conference_records_sync.py b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_conference_records_sync.py new file mode 100644 index 000000000000..60390f1f158c --- /dev/null +++ b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_conference_records_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListConferenceRecords +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-apps-meet + + +# [START meet_v2beta_generated_ConferenceRecordsService_ListConferenceRecords_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.apps import meet_v2beta + + +def sample_list_conference_records(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceClient() + + # Initialize request argument(s) + request = meet_v2beta.ListConferenceRecordsRequest( + ) + + # Make the request + page_result = client.list_conference_records(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END meet_v2beta_generated_ConferenceRecordsService_ListConferenceRecords_sync] diff --git a/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_participant_sessions_async.py b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_participant_sessions_async.py new file mode 100644 index 000000000000..7cfbd2ad6152 --- /dev/null +++ b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_participant_sessions_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListParticipantSessions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-apps-meet + + +# [START meet_v2beta_generated_ConferenceRecordsService_ListParticipantSessions_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.apps import meet_v2beta + + +async def sample_list_participant_sessions(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceAsyncClient() + + # Initialize request argument(s) + request = meet_v2beta.ListParticipantSessionsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_participant_sessions(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END meet_v2beta_generated_ConferenceRecordsService_ListParticipantSessions_async] diff --git a/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_participant_sessions_sync.py b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_participant_sessions_sync.py new file mode 100644 index 000000000000..3d7a519c4bd7 --- /dev/null +++ b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_participant_sessions_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListParticipantSessions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-apps-meet + + +# [START meet_v2beta_generated_ConferenceRecordsService_ListParticipantSessions_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.apps import meet_v2beta + + +def sample_list_participant_sessions(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceClient() + + # Initialize request argument(s) + request = meet_v2beta.ListParticipantSessionsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_participant_sessions(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END meet_v2beta_generated_ConferenceRecordsService_ListParticipantSessions_sync] diff --git a/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_participants_async.py b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_participants_async.py new file mode 100644 index 000000000000..c3ab2cac72f7 --- /dev/null +++ b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_participants_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListParticipants +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-apps-meet + + +# [START meet_v2beta_generated_ConferenceRecordsService_ListParticipants_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.apps import meet_v2beta + + +async def sample_list_participants(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceAsyncClient() + + # Initialize request argument(s) + request = meet_v2beta.ListParticipantsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_participants(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END meet_v2beta_generated_ConferenceRecordsService_ListParticipants_async] diff --git a/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_participants_sync.py b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_participants_sync.py new file mode 100644 index 000000000000..8dc1c2162d3f --- /dev/null +++ b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_participants_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListParticipants +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-apps-meet + + +# [START meet_v2beta_generated_ConferenceRecordsService_ListParticipants_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.apps import meet_v2beta + + +def sample_list_participants(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceClient() + + # Initialize request argument(s) + request = meet_v2beta.ListParticipantsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_participants(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END meet_v2beta_generated_ConferenceRecordsService_ListParticipants_sync] diff --git a/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_recordings_async.py b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_recordings_async.py new file mode 100644 index 000000000000..98abc3b31337 --- /dev/null +++ b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_recordings_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListRecordings +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-apps-meet + + +# [START meet_v2beta_generated_ConferenceRecordsService_ListRecordings_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.apps import meet_v2beta + + +async def sample_list_recordings(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceAsyncClient() + + # Initialize request argument(s) + request = meet_v2beta.ListRecordingsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_recordings(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END meet_v2beta_generated_ConferenceRecordsService_ListRecordings_async] diff --git a/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_recordings_sync.py b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_recordings_sync.py new file mode 100644 index 000000000000..9ebf7e1865ef --- /dev/null +++ b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_recordings_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListRecordings +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-apps-meet + + +# [START meet_v2beta_generated_ConferenceRecordsService_ListRecordings_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.apps import meet_v2beta + + +def sample_list_recordings(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceClient() + + # Initialize request argument(s) + request = meet_v2beta.ListRecordingsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_recordings(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END meet_v2beta_generated_ConferenceRecordsService_ListRecordings_sync] diff --git a/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_transcript_entries_async.py b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_transcript_entries_async.py new file mode 100644 index 000000000000..18fd6ace6c0e --- /dev/null +++ b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_transcript_entries_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTranscriptEntries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-apps-meet + + +# [START meet_v2beta_generated_ConferenceRecordsService_ListTranscriptEntries_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.apps import meet_v2beta + + +async def sample_list_transcript_entries(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceAsyncClient() + + # Initialize request argument(s) + request = meet_v2beta.ListTranscriptEntriesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_transcript_entries(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END meet_v2beta_generated_ConferenceRecordsService_ListTranscriptEntries_async] diff --git a/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_transcript_entries_sync.py b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_transcript_entries_sync.py new file mode 100644 index 000000000000..6695bc61d565 --- /dev/null +++ b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_transcript_entries_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTranscriptEntries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-apps-meet + + +# [START meet_v2beta_generated_ConferenceRecordsService_ListTranscriptEntries_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.apps import meet_v2beta + + +def sample_list_transcript_entries(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceClient() + + # Initialize request argument(s) + request = meet_v2beta.ListTranscriptEntriesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_transcript_entries(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END meet_v2beta_generated_ConferenceRecordsService_ListTranscriptEntries_sync] diff --git a/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_transcripts_async.py b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_transcripts_async.py new file mode 100644 index 000000000000..87d34aedfe03 --- /dev/null +++ b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_transcripts_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTranscripts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-apps-meet + + +# [START meet_v2beta_generated_ConferenceRecordsService_ListTranscripts_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.apps import meet_v2beta + + +async def sample_list_transcripts(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceAsyncClient() + + # Initialize request argument(s) + request = meet_v2beta.ListTranscriptsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_transcripts(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END meet_v2beta_generated_ConferenceRecordsService_ListTranscripts_async] diff --git a/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_transcripts_sync.py b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_transcripts_sync.py new file mode 100644 index 000000000000..cff2819c2311 --- /dev/null +++ b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_conference_records_service_list_transcripts_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTranscripts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-apps-meet + + +# [START meet_v2beta_generated_ConferenceRecordsService_ListTranscripts_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.apps import meet_v2beta + + +def sample_list_transcripts(): + # Create a client + client = meet_v2beta.ConferenceRecordsServiceClient() + + # Initialize request argument(s) + request = meet_v2beta.ListTranscriptsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_transcripts(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END meet_v2beta_generated_ConferenceRecordsService_ListTranscripts_sync] diff --git a/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_spaces_service_create_space_async.py b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_spaces_service_create_space_async.py new file mode 100644 index 000000000000..cff28d9b2765 --- /dev/null +++ b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_spaces_service_create_space_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateSpace +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-apps-meet + + +# [START meet_v2beta_generated_SpacesService_CreateSpace_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.apps import meet_v2beta + + +async def sample_create_space(): + # Create a client + client = meet_v2beta.SpacesServiceAsyncClient() + + # Initialize request argument(s) + request = meet_v2beta.CreateSpaceRequest( + ) + + # Make the request + response = await client.create_space(request=request) + + # Handle the response + print(response) + +# [END meet_v2beta_generated_SpacesService_CreateSpace_async] diff --git a/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_spaces_service_create_space_sync.py b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_spaces_service_create_space_sync.py new file mode 100644 index 000000000000..bc898f509d63 --- /dev/null +++ b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_spaces_service_create_space_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateSpace +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-apps-meet + + +# [START meet_v2beta_generated_SpacesService_CreateSpace_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.apps import meet_v2beta + + +def sample_create_space(): + # Create a client + client = meet_v2beta.SpacesServiceClient() + + # Initialize request argument(s) + request = meet_v2beta.CreateSpaceRequest( + ) + + # Make the request + response = client.create_space(request=request) + + # Handle the response + print(response) + +# [END meet_v2beta_generated_SpacesService_CreateSpace_sync] diff --git a/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_spaces_service_end_active_conference_async.py b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_spaces_service_end_active_conference_async.py new file mode 100644 index 000000000000..bf678fa8854d --- /dev/null +++ b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_spaces_service_end_active_conference_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for EndActiveConference +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-apps-meet + + +# [START meet_v2beta_generated_SpacesService_EndActiveConference_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.apps import meet_v2beta + + +async def sample_end_active_conference(): + # Create a client + client = meet_v2beta.SpacesServiceAsyncClient() + + # Initialize request argument(s) + request = meet_v2beta.EndActiveConferenceRequest( + name="name_value", + ) + + # Make the request + await client.end_active_conference(request=request) + + +# [END meet_v2beta_generated_SpacesService_EndActiveConference_async] diff --git a/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_spaces_service_end_active_conference_sync.py b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_spaces_service_end_active_conference_sync.py new file mode 100644 index 000000000000..cfb07ebc571f --- /dev/null +++ b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_spaces_service_end_active_conference_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for EndActiveConference +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-apps-meet + + +# [START meet_v2beta_generated_SpacesService_EndActiveConference_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.apps import meet_v2beta + + +def sample_end_active_conference(): + # Create a client + client = meet_v2beta.SpacesServiceClient() + + # Initialize request argument(s) + request = meet_v2beta.EndActiveConferenceRequest( + name="name_value", + ) + + # Make the request + client.end_active_conference(request=request) + + +# [END meet_v2beta_generated_SpacesService_EndActiveConference_sync] diff --git a/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_spaces_service_get_space_async.py b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_spaces_service_get_space_async.py new file mode 100644 index 000000000000..4bf3d9367517 --- /dev/null +++ b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_spaces_service_get_space_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetSpace +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-apps-meet + + +# [START meet_v2beta_generated_SpacesService_GetSpace_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.apps import meet_v2beta + + +async def sample_get_space(): + # Create a client + client = meet_v2beta.SpacesServiceAsyncClient() + + # Initialize request argument(s) + request = meet_v2beta.GetSpaceRequest( + name="name_value", + ) + + # Make the request + response = await client.get_space(request=request) + + # Handle the response + print(response) + +# [END meet_v2beta_generated_SpacesService_GetSpace_async] diff --git a/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_spaces_service_get_space_sync.py b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_spaces_service_get_space_sync.py new file mode 100644 index 000000000000..183d8644ed1d --- /dev/null +++ b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_spaces_service_get_space_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetSpace +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-apps-meet + + +# [START meet_v2beta_generated_SpacesService_GetSpace_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.apps import meet_v2beta + + +def sample_get_space(): + # Create a client + client = meet_v2beta.SpacesServiceClient() + + # Initialize request argument(s) + request = meet_v2beta.GetSpaceRequest( + name="name_value", + ) + + # Make the request + response = client.get_space(request=request) + + # Handle the response + print(response) + +# [END meet_v2beta_generated_SpacesService_GetSpace_sync] diff --git a/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_spaces_service_update_space_async.py b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_spaces_service_update_space_async.py new file mode 100644 index 000000000000..da7ac92ed24c --- /dev/null +++ b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_spaces_service_update_space_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateSpace +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-apps-meet + + +# [START meet_v2beta_generated_SpacesService_UpdateSpace_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.apps import meet_v2beta + + +async def sample_update_space(): + # Create a client + client = meet_v2beta.SpacesServiceAsyncClient() + + # Initialize request argument(s) + request = meet_v2beta.UpdateSpaceRequest( + ) + + # Make the request + response = await client.update_space(request=request) + + # Handle the response + print(response) + +# [END meet_v2beta_generated_SpacesService_UpdateSpace_async] diff --git a/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_spaces_service_update_space_sync.py b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_spaces_service_update_space_sync.py new file mode 100644 index 000000000000..7a85ea790c3a --- /dev/null +++ b/packages/google-apps-meet/samples/generated_samples/meet_v2beta_generated_spaces_service_update_space_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateSpace +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-apps-meet + + +# [START meet_v2beta_generated_SpacesService_UpdateSpace_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.apps import meet_v2beta + + +def sample_update_space(): + # Create a client + client = meet_v2beta.SpacesServiceClient() + + # Initialize request argument(s) + request = meet_v2beta.UpdateSpaceRequest( + ) + + # Make the request + response = client.update_space(request=request) + + # Handle the response + print(response) + +# [END meet_v2beta_generated_SpacesService_UpdateSpace_sync] diff --git a/packages/google-apps-meet/samples/generated_samples/snippet_metadata_google.apps.meet.v2beta.json b/packages/google-apps-meet/samples/generated_samples/snippet_metadata_google.apps.meet.v2beta.json new file mode 100644 index 000000000000..bbee1583d40d --- /dev/null +++ b/packages/google-apps-meet/samples/generated_samples/snippet_metadata_google.apps.meet.v2beta.json @@ -0,0 +1,2585 @@ +{ + "clientLibrary": { + "apis": [ + { + "id": "google.apps.meet.v2beta", + "version": "v2beta" + } + ], + "language": "PYTHON", + "name": "google-apps-meet", + "version": "0.1.0" + }, + "snippets": [ + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceAsyncClient", + "shortName": "ConferenceRecordsServiceAsyncClient" + }, + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceAsyncClient.get_conference_record", + "method": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService.GetConferenceRecord", + "service": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService", + "shortName": "ConferenceRecordsService" + }, + "shortName": "GetConferenceRecord" + }, + "parameters": [ + { + "name": "request", + "type": "google.apps.meet_v2beta.types.GetConferenceRecordRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.apps.meet_v2beta.types.ConferenceRecord", + "shortName": "get_conference_record" + }, + "description": "Sample for GetConferenceRecord", + "file": "meet_v2beta_generated_conference_records_service_get_conference_record_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "meet_v2beta_generated_ConferenceRecordsService_GetConferenceRecord_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "meet_v2beta_generated_conference_records_service_get_conference_record_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceClient", + "shortName": "ConferenceRecordsServiceClient" + }, + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceClient.get_conference_record", + "method": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService.GetConferenceRecord", + "service": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService", + "shortName": "ConferenceRecordsService" + }, + "shortName": "GetConferenceRecord" + }, + "parameters": [ + { + "name": "request", + "type": "google.apps.meet_v2beta.types.GetConferenceRecordRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.apps.meet_v2beta.types.ConferenceRecord", + "shortName": "get_conference_record" + }, + "description": "Sample for GetConferenceRecord", + "file": "meet_v2beta_generated_conference_records_service_get_conference_record_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "meet_v2beta_generated_ConferenceRecordsService_GetConferenceRecord_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "meet_v2beta_generated_conference_records_service_get_conference_record_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceAsyncClient", + "shortName": "ConferenceRecordsServiceAsyncClient" + }, + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceAsyncClient.get_participant_session", + "method": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService.GetParticipantSession", + "service": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService", + "shortName": "ConferenceRecordsService" + }, + "shortName": "GetParticipantSession" + }, + "parameters": [ + { + "name": "request", + "type": "google.apps.meet_v2beta.types.GetParticipantSessionRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.apps.meet_v2beta.types.ParticipantSession", + "shortName": "get_participant_session" + }, + "description": "Sample for GetParticipantSession", + "file": "meet_v2beta_generated_conference_records_service_get_participant_session_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "meet_v2beta_generated_ConferenceRecordsService_GetParticipantSession_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "meet_v2beta_generated_conference_records_service_get_participant_session_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceClient", + "shortName": "ConferenceRecordsServiceClient" + }, + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceClient.get_participant_session", + "method": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService.GetParticipantSession", + "service": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService", + "shortName": "ConferenceRecordsService" + }, + "shortName": "GetParticipantSession" + }, + "parameters": [ + { + "name": "request", + "type": "google.apps.meet_v2beta.types.GetParticipantSessionRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.apps.meet_v2beta.types.ParticipantSession", + "shortName": "get_participant_session" + }, + "description": "Sample for GetParticipantSession", + "file": "meet_v2beta_generated_conference_records_service_get_participant_session_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "meet_v2beta_generated_ConferenceRecordsService_GetParticipantSession_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "meet_v2beta_generated_conference_records_service_get_participant_session_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceAsyncClient", + "shortName": "ConferenceRecordsServiceAsyncClient" + }, + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceAsyncClient.get_participant", + "method": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService.GetParticipant", + "service": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService", + "shortName": "ConferenceRecordsService" + }, + "shortName": "GetParticipant" + }, + "parameters": [ + { + "name": "request", + "type": "google.apps.meet_v2beta.types.GetParticipantRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.apps.meet_v2beta.types.Participant", + "shortName": "get_participant" + }, + "description": "Sample for GetParticipant", + "file": "meet_v2beta_generated_conference_records_service_get_participant_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "meet_v2beta_generated_ConferenceRecordsService_GetParticipant_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "meet_v2beta_generated_conference_records_service_get_participant_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceClient", + "shortName": "ConferenceRecordsServiceClient" + }, + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceClient.get_participant", + "method": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService.GetParticipant", + "service": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService", + "shortName": "ConferenceRecordsService" + }, + "shortName": "GetParticipant" + }, + "parameters": [ + { + "name": "request", + "type": "google.apps.meet_v2beta.types.GetParticipantRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.apps.meet_v2beta.types.Participant", + "shortName": "get_participant" + }, + "description": "Sample for GetParticipant", + "file": "meet_v2beta_generated_conference_records_service_get_participant_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "meet_v2beta_generated_ConferenceRecordsService_GetParticipant_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "meet_v2beta_generated_conference_records_service_get_participant_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceAsyncClient", + "shortName": "ConferenceRecordsServiceAsyncClient" + }, + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceAsyncClient.get_recording", + "method": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService.GetRecording", + "service": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService", + "shortName": "ConferenceRecordsService" + }, + "shortName": "GetRecording" + }, + "parameters": [ + { + "name": "request", + "type": "google.apps.meet_v2beta.types.GetRecordingRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.apps.meet_v2beta.types.Recording", + "shortName": "get_recording" + }, + "description": "Sample for GetRecording", + "file": "meet_v2beta_generated_conference_records_service_get_recording_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "meet_v2beta_generated_ConferenceRecordsService_GetRecording_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "meet_v2beta_generated_conference_records_service_get_recording_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceClient", + "shortName": "ConferenceRecordsServiceClient" + }, + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceClient.get_recording", + "method": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService.GetRecording", + "service": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService", + "shortName": "ConferenceRecordsService" + }, + "shortName": "GetRecording" + }, + "parameters": [ + { + "name": "request", + "type": "google.apps.meet_v2beta.types.GetRecordingRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.apps.meet_v2beta.types.Recording", + "shortName": "get_recording" + }, + "description": "Sample for GetRecording", + "file": "meet_v2beta_generated_conference_records_service_get_recording_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "meet_v2beta_generated_ConferenceRecordsService_GetRecording_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "meet_v2beta_generated_conference_records_service_get_recording_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceAsyncClient", + "shortName": "ConferenceRecordsServiceAsyncClient" + }, + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceAsyncClient.get_transcript_entry", + "method": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService.GetTranscriptEntry", + "service": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService", + "shortName": "ConferenceRecordsService" + }, + "shortName": "GetTranscriptEntry" + }, + "parameters": [ + { + "name": "request", + "type": "google.apps.meet_v2beta.types.GetTranscriptEntryRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.apps.meet_v2beta.types.TranscriptEntry", + "shortName": "get_transcript_entry" + }, + "description": "Sample for GetTranscriptEntry", + "file": "meet_v2beta_generated_conference_records_service_get_transcript_entry_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "meet_v2beta_generated_ConferenceRecordsService_GetTranscriptEntry_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "meet_v2beta_generated_conference_records_service_get_transcript_entry_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceClient", + "shortName": "ConferenceRecordsServiceClient" + }, + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceClient.get_transcript_entry", + "method": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService.GetTranscriptEntry", + "service": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService", + "shortName": "ConferenceRecordsService" + }, + "shortName": "GetTranscriptEntry" + }, + "parameters": [ + { + "name": "request", + "type": "google.apps.meet_v2beta.types.GetTranscriptEntryRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.apps.meet_v2beta.types.TranscriptEntry", + "shortName": "get_transcript_entry" + }, + "description": "Sample for GetTranscriptEntry", + "file": "meet_v2beta_generated_conference_records_service_get_transcript_entry_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "meet_v2beta_generated_ConferenceRecordsService_GetTranscriptEntry_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "meet_v2beta_generated_conference_records_service_get_transcript_entry_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceAsyncClient", + "shortName": "ConferenceRecordsServiceAsyncClient" + }, + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceAsyncClient.get_transcript", + "method": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService.GetTranscript", + "service": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService", + "shortName": "ConferenceRecordsService" + }, + "shortName": "GetTranscript" + }, + "parameters": [ + { + "name": "request", + "type": "google.apps.meet_v2beta.types.GetTranscriptRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.apps.meet_v2beta.types.Transcript", + "shortName": "get_transcript" + }, + "description": "Sample for GetTranscript", + "file": "meet_v2beta_generated_conference_records_service_get_transcript_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "meet_v2beta_generated_ConferenceRecordsService_GetTranscript_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "meet_v2beta_generated_conference_records_service_get_transcript_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceClient", + "shortName": "ConferenceRecordsServiceClient" + }, + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceClient.get_transcript", + "method": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService.GetTranscript", + "service": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService", + "shortName": "ConferenceRecordsService" + }, + "shortName": "GetTranscript" + }, + "parameters": [ + { + "name": "request", + "type": "google.apps.meet_v2beta.types.GetTranscriptRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.apps.meet_v2beta.types.Transcript", + "shortName": "get_transcript" + }, + "description": "Sample for GetTranscript", + "file": "meet_v2beta_generated_conference_records_service_get_transcript_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "meet_v2beta_generated_ConferenceRecordsService_GetTranscript_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "meet_v2beta_generated_conference_records_service_get_transcript_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceAsyncClient", + "shortName": "ConferenceRecordsServiceAsyncClient" + }, + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceAsyncClient.list_conference_records", + "method": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService.ListConferenceRecords", + "service": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService", + "shortName": "ConferenceRecordsService" + }, + "shortName": "ListConferenceRecords" + }, + "parameters": [ + { + "name": "request", + "type": "google.apps.meet_v2beta.types.ListConferenceRecordsRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.apps.meet_v2beta.services.conference_records_service.pagers.ListConferenceRecordsAsyncPager", + "shortName": "list_conference_records" + }, + "description": "Sample for ListConferenceRecords", + "file": "meet_v2beta_generated_conference_records_service_list_conference_records_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "meet_v2beta_generated_ConferenceRecordsService_ListConferenceRecords_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "meet_v2beta_generated_conference_records_service_list_conference_records_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceClient", + "shortName": "ConferenceRecordsServiceClient" + }, + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceClient.list_conference_records", + "method": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService.ListConferenceRecords", + "service": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService", + "shortName": "ConferenceRecordsService" + }, + "shortName": "ListConferenceRecords" + }, + "parameters": [ + { + "name": "request", + "type": "google.apps.meet_v2beta.types.ListConferenceRecordsRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.apps.meet_v2beta.services.conference_records_service.pagers.ListConferenceRecordsPager", + "shortName": "list_conference_records" + }, + "description": "Sample for ListConferenceRecords", + "file": "meet_v2beta_generated_conference_records_service_list_conference_records_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "meet_v2beta_generated_ConferenceRecordsService_ListConferenceRecords_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "meet_v2beta_generated_conference_records_service_list_conference_records_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceAsyncClient", + "shortName": "ConferenceRecordsServiceAsyncClient" + }, + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceAsyncClient.list_participant_sessions", + "method": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService.ListParticipantSessions", + "service": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService", + "shortName": "ConferenceRecordsService" + }, + "shortName": "ListParticipantSessions" + }, + "parameters": [ + { + "name": "request", + "type": "google.apps.meet_v2beta.types.ListParticipantSessionsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.apps.meet_v2beta.services.conference_records_service.pagers.ListParticipantSessionsAsyncPager", + "shortName": "list_participant_sessions" + }, + "description": "Sample for ListParticipantSessions", + "file": "meet_v2beta_generated_conference_records_service_list_participant_sessions_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "meet_v2beta_generated_ConferenceRecordsService_ListParticipantSessions_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "meet_v2beta_generated_conference_records_service_list_participant_sessions_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceClient", + "shortName": "ConferenceRecordsServiceClient" + }, + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceClient.list_participant_sessions", + "method": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService.ListParticipantSessions", + "service": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService", + "shortName": "ConferenceRecordsService" + }, + "shortName": "ListParticipantSessions" + }, + "parameters": [ + { + "name": "request", + "type": "google.apps.meet_v2beta.types.ListParticipantSessionsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.apps.meet_v2beta.services.conference_records_service.pagers.ListParticipantSessionsPager", + "shortName": "list_participant_sessions" + }, + "description": "Sample for ListParticipantSessions", + "file": "meet_v2beta_generated_conference_records_service_list_participant_sessions_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "meet_v2beta_generated_ConferenceRecordsService_ListParticipantSessions_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "meet_v2beta_generated_conference_records_service_list_participant_sessions_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceAsyncClient", + "shortName": "ConferenceRecordsServiceAsyncClient" + }, + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceAsyncClient.list_participants", + "method": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService.ListParticipants", + "service": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService", + "shortName": "ConferenceRecordsService" + }, + "shortName": "ListParticipants" + }, + "parameters": [ + { + "name": "request", + "type": "google.apps.meet_v2beta.types.ListParticipantsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.apps.meet_v2beta.services.conference_records_service.pagers.ListParticipantsAsyncPager", + "shortName": "list_participants" + }, + "description": "Sample for ListParticipants", + "file": "meet_v2beta_generated_conference_records_service_list_participants_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "meet_v2beta_generated_ConferenceRecordsService_ListParticipants_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "meet_v2beta_generated_conference_records_service_list_participants_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceClient", + "shortName": "ConferenceRecordsServiceClient" + }, + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceClient.list_participants", + "method": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService.ListParticipants", + "service": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService", + "shortName": "ConferenceRecordsService" + }, + "shortName": "ListParticipants" + }, + "parameters": [ + { + "name": "request", + "type": "google.apps.meet_v2beta.types.ListParticipantsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.apps.meet_v2beta.services.conference_records_service.pagers.ListParticipantsPager", + "shortName": "list_participants" + }, + "description": "Sample for ListParticipants", + "file": "meet_v2beta_generated_conference_records_service_list_participants_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "meet_v2beta_generated_ConferenceRecordsService_ListParticipants_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "meet_v2beta_generated_conference_records_service_list_participants_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceAsyncClient", + "shortName": "ConferenceRecordsServiceAsyncClient" + }, + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceAsyncClient.list_recordings", + "method": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService.ListRecordings", + "service": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService", + "shortName": "ConferenceRecordsService" + }, + "shortName": "ListRecordings" + }, + "parameters": [ + { + "name": "request", + "type": "google.apps.meet_v2beta.types.ListRecordingsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.apps.meet_v2beta.services.conference_records_service.pagers.ListRecordingsAsyncPager", + "shortName": "list_recordings" + }, + "description": "Sample for ListRecordings", + "file": "meet_v2beta_generated_conference_records_service_list_recordings_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "meet_v2beta_generated_ConferenceRecordsService_ListRecordings_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "meet_v2beta_generated_conference_records_service_list_recordings_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceClient", + "shortName": "ConferenceRecordsServiceClient" + }, + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceClient.list_recordings", + "method": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService.ListRecordings", + "service": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService", + "shortName": "ConferenceRecordsService" + }, + "shortName": "ListRecordings" + }, + "parameters": [ + { + "name": "request", + "type": "google.apps.meet_v2beta.types.ListRecordingsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.apps.meet_v2beta.services.conference_records_service.pagers.ListRecordingsPager", + "shortName": "list_recordings" + }, + "description": "Sample for ListRecordings", + "file": "meet_v2beta_generated_conference_records_service_list_recordings_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "meet_v2beta_generated_ConferenceRecordsService_ListRecordings_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "meet_v2beta_generated_conference_records_service_list_recordings_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceAsyncClient", + "shortName": "ConferenceRecordsServiceAsyncClient" + }, + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceAsyncClient.list_transcript_entries", + "method": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService.ListTranscriptEntries", + "service": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService", + "shortName": "ConferenceRecordsService" + }, + "shortName": "ListTranscriptEntries" + }, + "parameters": [ + { + "name": "request", + "type": "google.apps.meet_v2beta.types.ListTranscriptEntriesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.apps.meet_v2beta.services.conference_records_service.pagers.ListTranscriptEntriesAsyncPager", + "shortName": "list_transcript_entries" + }, + "description": "Sample for ListTranscriptEntries", + "file": "meet_v2beta_generated_conference_records_service_list_transcript_entries_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "meet_v2beta_generated_ConferenceRecordsService_ListTranscriptEntries_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "meet_v2beta_generated_conference_records_service_list_transcript_entries_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceClient", + "shortName": "ConferenceRecordsServiceClient" + }, + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceClient.list_transcript_entries", + "method": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService.ListTranscriptEntries", + "service": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService", + "shortName": "ConferenceRecordsService" + }, + "shortName": "ListTranscriptEntries" + }, + "parameters": [ + { + "name": "request", + "type": "google.apps.meet_v2beta.types.ListTranscriptEntriesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.apps.meet_v2beta.services.conference_records_service.pagers.ListTranscriptEntriesPager", + "shortName": "list_transcript_entries" + }, + "description": "Sample for ListTranscriptEntries", + "file": "meet_v2beta_generated_conference_records_service_list_transcript_entries_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "meet_v2beta_generated_ConferenceRecordsService_ListTranscriptEntries_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "meet_v2beta_generated_conference_records_service_list_transcript_entries_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceAsyncClient", + "shortName": "ConferenceRecordsServiceAsyncClient" + }, + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceAsyncClient.list_transcripts", + "method": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService.ListTranscripts", + "service": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService", + "shortName": "ConferenceRecordsService" + }, + "shortName": "ListTranscripts" + }, + "parameters": [ + { + "name": "request", + "type": "google.apps.meet_v2beta.types.ListTranscriptsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.apps.meet_v2beta.services.conference_records_service.pagers.ListTranscriptsAsyncPager", + "shortName": "list_transcripts" + }, + "description": "Sample for ListTranscripts", + "file": "meet_v2beta_generated_conference_records_service_list_transcripts_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "meet_v2beta_generated_ConferenceRecordsService_ListTranscripts_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "meet_v2beta_generated_conference_records_service_list_transcripts_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceClient", + "shortName": "ConferenceRecordsServiceClient" + }, + "fullName": "google.apps.meet_v2beta.ConferenceRecordsServiceClient.list_transcripts", + "method": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService.ListTranscripts", + "service": { + "fullName": "google.apps.meet.v2beta.ConferenceRecordsService", + "shortName": "ConferenceRecordsService" + }, + "shortName": "ListTranscripts" + }, + "parameters": [ + { + "name": "request", + "type": "google.apps.meet_v2beta.types.ListTranscriptsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.apps.meet_v2beta.services.conference_records_service.pagers.ListTranscriptsPager", + "shortName": "list_transcripts" + }, + "description": "Sample for ListTranscripts", + "file": "meet_v2beta_generated_conference_records_service_list_transcripts_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "meet_v2beta_generated_ConferenceRecordsService_ListTranscripts_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "meet_v2beta_generated_conference_records_service_list_transcripts_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.apps.meet_v2beta.SpacesServiceAsyncClient", + "shortName": "SpacesServiceAsyncClient" + }, + "fullName": "google.apps.meet_v2beta.SpacesServiceAsyncClient.create_space", + "method": { + "fullName": "google.apps.meet.v2beta.SpacesService.CreateSpace", + "service": { + "fullName": "google.apps.meet.v2beta.SpacesService", + "shortName": "SpacesService" + }, + "shortName": "CreateSpace" + }, + "parameters": [ + { + "name": "request", + "type": "google.apps.meet_v2beta.types.CreateSpaceRequest" + }, + { + "name": "space", + "type": "google.apps.meet_v2beta.types.Space" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.apps.meet_v2beta.types.Space", + "shortName": "create_space" + }, + "description": "Sample for CreateSpace", + "file": "meet_v2beta_generated_spaces_service_create_space_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "meet_v2beta_generated_SpacesService_CreateSpace_async", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "meet_v2beta_generated_spaces_service_create_space_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.apps.meet_v2beta.SpacesServiceClient", + "shortName": "SpacesServiceClient" + }, + "fullName": "google.apps.meet_v2beta.SpacesServiceClient.create_space", + "method": { + "fullName": "google.apps.meet.v2beta.SpacesService.CreateSpace", + "service": { + "fullName": "google.apps.meet.v2beta.SpacesService", + "shortName": "SpacesService" + }, + "shortName": "CreateSpace" + }, + "parameters": [ + { + "name": "request", + "type": "google.apps.meet_v2beta.types.CreateSpaceRequest" + }, + { + "name": "space", + "type": "google.apps.meet_v2beta.types.Space" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.apps.meet_v2beta.types.Space", + "shortName": "create_space" + }, + "description": "Sample for CreateSpace", + "file": "meet_v2beta_generated_spaces_service_create_space_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "meet_v2beta_generated_SpacesService_CreateSpace_sync", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "meet_v2beta_generated_spaces_service_create_space_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.apps.meet_v2beta.SpacesServiceAsyncClient", + "shortName": "SpacesServiceAsyncClient" + }, + "fullName": "google.apps.meet_v2beta.SpacesServiceAsyncClient.end_active_conference", + "method": { + "fullName": "google.apps.meet.v2beta.SpacesService.EndActiveConference", + "service": { + "fullName": "google.apps.meet.v2beta.SpacesService", + "shortName": "SpacesService" + }, + "shortName": "EndActiveConference" + }, + "parameters": [ + { + "name": "request", + "type": "google.apps.meet_v2beta.types.EndActiveConferenceRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "end_active_conference" + }, + "description": "Sample for EndActiveConference", + "file": "meet_v2beta_generated_spaces_service_end_active_conference_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "meet_v2beta_generated_SpacesService_EndActiveConference_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "meet_v2beta_generated_spaces_service_end_active_conference_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.apps.meet_v2beta.SpacesServiceClient", + "shortName": "SpacesServiceClient" + }, + "fullName": "google.apps.meet_v2beta.SpacesServiceClient.end_active_conference", + "method": { + "fullName": "google.apps.meet.v2beta.SpacesService.EndActiveConference", + "service": { + "fullName": "google.apps.meet.v2beta.SpacesService", + "shortName": "SpacesService" + }, + "shortName": "EndActiveConference" + }, + "parameters": [ + { + "name": "request", + "type": "google.apps.meet_v2beta.types.EndActiveConferenceRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "end_active_conference" + }, + "description": "Sample for EndActiveConference", + "file": "meet_v2beta_generated_spaces_service_end_active_conference_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "meet_v2beta_generated_SpacesService_EndActiveConference_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "meet_v2beta_generated_spaces_service_end_active_conference_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.apps.meet_v2beta.SpacesServiceAsyncClient", + "shortName": "SpacesServiceAsyncClient" + }, + "fullName": "google.apps.meet_v2beta.SpacesServiceAsyncClient.get_space", + "method": { + "fullName": "google.apps.meet.v2beta.SpacesService.GetSpace", + "service": { + "fullName": "google.apps.meet.v2beta.SpacesService", + "shortName": "SpacesService" + }, + "shortName": "GetSpace" + }, + "parameters": [ + { + "name": "request", + "type": "google.apps.meet_v2beta.types.GetSpaceRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.apps.meet_v2beta.types.Space", + "shortName": "get_space" + }, + "description": "Sample for GetSpace", + "file": "meet_v2beta_generated_spaces_service_get_space_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "meet_v2beta_generated_SpacesService_GetSpace_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "meet_v2beta_generated_spaces_service_get_space_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.apps.meet_v2beta.SpacesServiceClient", + "shortName": "SpacesServiceClient" + }, + "fullName": "google.apps.meet_v2beta.SpacesServiceClient.get_space", + "method": { + "fullName": "google.apps.meet.v2beta.SpacesService.GetSpace", + "service": { + "fullName": "google.apps.meet.v2beta.SpacesService", + "shortName": "SpacesService" + }, + "shortName": "GetSpace" + }, + "parameters": [ + { + "name": "request", + "type": "google.apps.meet_v2beta.types.GetSpaceRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.apps.meet_v2beta.types.Space", + "shortName": "get_space" + }, + "description": "Sample for GetSpace", + "file": "meet_v2beta_generated_spaces_service_get_space_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "meet_v2beta_generated_SpacesService_GetSpace_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "meet_v2beta_generated_spaces_service_get_space_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.apps.meet_v2beta.SpacesServiceAsyncClient", + "shortName": "SpacesServiceAsyncClient" + }, + "fullName": "google.apps.meet_v2beta.SpacesServiceAsyncClient.update_space", + "method": { + "fullName": "google.apps.meet.v2beta.SpacesService.UpdateSpace", + "service": { + "fullName": "google.apps.meet.v2beta.SpacesService", + "shortName": "SpacesService" + }, + "shortName": "UpdateSpace" + }, + "parameters": [ + { + "name": "request", + "type": "google.apps.meet_v2beta.types.UpdateSpaceRequest" + }, + { + "name": "space", + "type": "google.apps.meet_v2beta.types.Space" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.apps.meet_v2beta.types.Space", + "shortName": "update_space" + }, + "description": "Sample for UpdateSpace", + "file": "meet_v2beta_generated_spaces_service_update_space_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "meet_v2beta_generated_SpacesService_UpdateSpace_async", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "meet_v2beta_generated_spaces_service_update_space_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.apps.meet_v2beta.SpacesServiceClient", + "shortName": "SpacesServiceClient" + }, + "fullName": "google.apps.meet_v2beta.SpacesServiceClient.update_space", + "method": { + "fullName": "google.apps.meet.v2beta.SpacesService.UpdateSpace", + "service": { + "fullName": "google.apps.meet.v2beta.SpacesService", + "shortName": "SpacesService" + }, + "shortName": "UpdateSpace" + }, + "parameters": [ + { + "name": "request", + "type": "google.apps.meet_v2beta.types.UpdateSpaceRequest" + }, + { + "name": "space", + "type": "google.apps.meet_v2beta.types.Space" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.apps.meet_v2beta.types.Space", + "shortName": "update_space" + }, + "description": "Sample for UpdateSpace", + "file": "meet_v2beta_generated_spaces_service_update_space_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "meet_v2beta_generated_SpacesService_UpdateSpace_sync", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "meet_v2beta_generated_spaces_service_update_space_sync.py" + } + ] +} diff --git a/packages/google-apps-meet/scripts/decrypt-secrets.sh b/packages/google-apps-meet/scripts/decrypt-secrets.sh new file mode 100755 index 000000000000..0018b421ddf8 --- /dev/null +++ b/packages/google-apps-meet/scripts/decrypt-secrets.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +# Copyright 2023 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +ROOT=$( dirname "$DIR" ) + +# Work from the project root. +cd $ROOT + +# Prevent it from overriding files. +# We recommend that sample authors use their own service account files and cloud project. +# In that case, they are supposed to prepare these files by themselves. +if [[ -f "testing/test-env.sh" ]] || \ + [[ -f "testing/service-account.json" ]] || \ + [[ -f "testing/client-secrets.json" ]]; then + echo "One or more target files exist, aborting." + exit 1 +fi + +# Use SECRET_MANAGER_PROJECT if set, fallback to cloud-devrel-kokoro-resources. +PROJECT_ID="${SECRET_MANAGER_PROJECT:-cloud-devrel-kokoro-resources}" + +gcloud secrets versions access latest --secret="python-docs-samples-test-env" \ + --project="${PROJECT_ID}" \ + > testing/test-env.sh +gcloud secrets versions access latest \ + --secret="python-docs-samples-service-account" \ + --project="${PROJECT_ID}" \ + > testing/service-account.json +gcloud secrets versions access latest \ + --secret="python-docs-samples-client-secrets" \ + --project="${PROJECT_ID}" \ + > testing/client-secrets.json diff --git a/packages/google-apps-meet/scripts/fixup_meet_v2beta_keywords.py b/packages/google-apps-meet/scripts/fixup_meet_v2beta_keywords.py new file mode 100644 index 000000000000..147ce8a1274f --- /dev/null +++ b/packages/google-apps-meet/scripts/fixup_meet_v2beta_keywords.py @@ -0,0 +1,191 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class meetCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'create_space': ('space', ), + 'end_active_conference': ('name', ), + 'get_conference_record': ('name', ), + 'get_participant': ('name', ), + 'get_participant_session': ('name', ), + 'get_recording': ('name', ), + 'get_space': ('name', ), + 'get_transcript': ('name', ), + 'get_transcript_entry': ('name', ), + 'list_conference_records': ('page_size', 'page_token', 'filter', ), + 'list_participants': ('parent', 'page_size', 'page_token', 'filter', ), + 'list_participant_sessions': ('parent', 'page_size', 'page_token', 'filter', ), + 'list_recordings': ('parent', 'page_size', 'page_token', ), + 'list_transcript_entries': ('parent', 'page_size', 'page_token', ), + 'list_transcripts': ('parent', 'page_size', 'page_token', ), + 'update_space': ('space', 'update_mask', ), + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: a.keyword.value not in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=meetCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the meet client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/packages/google-apps-meet/setup.py b/packages/google-apps-meet/setup.py new file mode 100644 index 000000000000..73445f028822 --- /dev/null +++ b/packages/google-apps-meet/setup.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import io +import os +import re + +import setuptools # type: ignore + +package_root = os.path.abspath(os.path.dirname(__file__)) + +name = "google-apps-meet" + + +description = "Google Apps Meet API client library" + +version = None + +with open(os.path.join(package_root, "google/apps/meet/gapic_version.py")) as fp: + version_candidates = re.findall(r"(?<=\")\d+.\d+.\d+(?=\")", fp.read()) + assert len(version_candidates) == 1 + version = version_candidates[0] + +if version[0] == "0": + release_status = "Development Status :: 4 - Beta" +else: + release_status = "Development Status :: 5 - Production/Stable" + +dependencies = [ + "google-api-core[grpc] >= 1.34.0, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,!=2.8.*,!=2.9.*,!=2.10.*", + "proto-plus >= 1.22.3, <2.0.0dev", + "protobuf>=3.19.5,<5.0.0dev,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5", +] +url = "https://github.com/googleapis/google-cloud-python/tree/main/packages/google-apps-meet" + +package_root = os.path.abspath(os.path.dirname(__file__)) + +readme_filename = os.path.join(package_root, "README.rst") +with io.open(readme_filename, encoding="utf-8") as readme_file: + readme = readme_file.read() + +packages = [ + package + for package in setuptools.find_namespace_packages() + if package.startswith("google") +] + +setuptools.setup( + name=name, + version=version, + description=description, + long_description=readme, + author="Google LLC", + author_email="googleapis-packages@google.com", + license="Apache 2.0", + url=url, + classifiers=[ + release_status, + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Operating System :: OS Independent", + "Topic :: Internet", + ], + platforms="Posix; MacOS X; Windows", + packages=packages, + python_requires=">=3.7", + install_requires=dependencies, + include_package_data=True, + zip_safe=False, +) diff --git a/packages/google-apps-meet/testing/.gitignore b/packages/google-apps-meet/testing/.gitignore new file mode 100644 index 000000000000..b05fbd630881 --- /dev/null +++ b/packages/google-apps-meet/testing/.gitignore @@ -0,0 +1,3 @@ +test-env.sh +service-account.json +client-secrets.json \ No newline at end of file diff --git a/packages/google-apps-meet/testing/constraints-3.10.txt b/packages/google-apps-meet/testing/constraints-3.10.txt new file mode 100644 index 000000000000..ed7f9aed2559 --- /dev/null +++ b/packages/google-apps-meet/testing/constraints-3.10.txt @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf diff --git a/packages/google-apps-meet/testing/constraints-3.11.txt b/packages/google-apps-meet/testing/constraints-3.11.txt new file mode 100644 index 000000000000..ed7f9aed2559 --- /dev/null +++ b/packages/google-apps-meet/testing/constraints-3.11.txt @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf diff --git a/packages/google-apps-meet/testing/constraints-3.12.txt b/packages/google-apps-meet/testing/constraints-3.12.txt new file mode 100644 index 000000000000..ed7f9aed2559 --- /dev/null +++ b/packages/google-apps-meet/testing/constraints-3.12.txt @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf diff --git a/packages/google-apps-meet/testing/constraints-3.7.txt b/packages/google-apps-meet/testing/constraints-3.7.txt new file mode 100644 index 000000000000..185f7d366c2f --- /dev/null +++ b/packages/google-apps-meet/testing/constraints-3.7.txt @@ -0,0 +1,9 @@ +# This constraints file is used to check that lower bounds +# are correct in setup.py +# List all library dependencies and extras in this file. +# Pin the version to the lower bound. +# e.g., if setup.py has "google-cloud-foo >= 1.14.0, < 2.0.0dev", +# Then this file should have google-cloud-foo==1.14.0 +google-api-core==1.34.0 +proto-plus==1.22.3 +protobuf==3.19.5 diff --git a/packages/google-apps-meet/testing/constraints-3.8.txt b/packages/google-apps-meet/testing/constraints-3.8.txt new file mode 100644 index 000000000000..ed7f9aed2559 --- /dev/null +++ b/packages/google-apps-meet/testing/constraints-3.8.txt @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf diff --git a/packages/google-apps-meet/testing/constraints-3.9.txt b/packages/google-apps-meet/testing/constraints-3.9.txt new file mode 100644 index 000000000000..ed7f9aed2559 --- /dev/null +++ b/packages/google-apps-meet/testing/constraints-3.9.txt @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf diff --git a/packages/google-apps-meet/tests/__init__.py b/packages/google-apps-meet/tests/__init__.py new file mode 100644 index 000000000000..89a37dc92c5a --- /dev/null +++ b/packages/google-apps-meet/tests/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/packages/google-apps-meet/tests/unit/__init__.py b/packages/google-apps-meet/tests/unit/__init__.py new file mode 100644 index 000000000000..89a37dc92c5a --- /dev/null +++ b/packages/google-apps-meet/tests/unit/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/packages/google-apps-meet/tests/unit/gapic/__init__.py b/packages/google-apps-meet/tests/unit/gapic/__init__.py new file mode 100644 index 000000000000..89a37dc92c5a --- /dev/null +++ b/packages/google-apps-meet/tests/unit/gapic/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/packages/google-apps-meet/tests/unit/gapic/meet_v2beta/__init__.py b/packages/google-apps-meet/tests/unit/gapic/meet_v2beta/__init__.py new file mode 100644 index 000000000000..89a37dc92c5a --- /dev/null +++ b/packages/google-apps-meet/tests/unit/gapic/meet_v2beta/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/packages/google-apps-meet/tests/unit/gapic/meet_v2beta/test_conference_records_service.py b/packages/google-apps-meet/tests/unit/gapic/meet_v2beta/test_conference_records_service.py new file mode 100644 index 000000000000..e99c56a7b750 --- /dev/null +++ b/packages/google-apps-meet/tests/unit/gapic/meet_v2beta/test_conference_records_service.py @@ -0,0 +1,9169 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER + import mock + +from collections.abc import Iterable +import json +import math + +from google.api_core import gapic_v1, grpc_helpers, grpc_helpers_async, path_template +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +import google.auth +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.oauth2 import service_account +from google.protobuf import json_format +from google.protobuf import timestamp_pb2 # type: ignore +import grpc +from grpc.experimental import aio +from proto.marshal.rules import wrappers +from proto.marshal.rules.dates import DurationRule, TimestampRule +import pytest +from requests import PreparedRequest, Request, Response +from requests.sessions import Session + +from google.apps.meet_v2beta.services.conference_records_service import ( + ConferenceRecordsServiceAsyncClient, + ConferenceRecordsServiceClient, + pagers, + transports, +) +from google.apps.meet_v2beta.types import resource, service + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert ConferenceRecordsServiceClient._get_default_mtls_endpoint(None) is None + assert ( + ConferenceRecordsServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + ConferenceRecordsServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + ConferenceRecordsServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + ConferenceRecordsServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + ConferenceRecordsServiceClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (ConferenceRecordsServiceClient, "grpc"), + (ConferenceRecordsServiceAsyncClient, "grpc_asyncio"), + (ConferenceRecordsServiceClient, "rest"), + ], +) +def test_conference_records_service_client_from_service_account_info( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "meet.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://meet.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.ConferenceRecordsServiceGrpcTransport, "grpc"), + (transports.ConferenceRecordsServiceGrpcAsyncIOTransport, "grpc_asyncio"), + (transports.ConferenceRecordsServiceRestTransport, "rest"), + ], +) +def test_conference_records_service_client_service_account_always_use_jwt( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (ConferenceRecordsServiceClient, "grpc"), + (ConferenceRecordsServiceAsyncClient, "grpc_asyncio"), + (ConferenceRecordsServiceClient, "rest"), + ], +) +def test_conference_records_service_client_from_service_account_file( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "meet.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://meet.googleapis.com" + ) + + +def test_conference_records_service_client_get_transport_class(): + transport = ConferenceRecordsServiceClient.get_transport_class() + available_transports = [ + transports.ConferenceRecordsServiceGrpcTransport, + transports.ConferenceRecordsServiceRestTransport, + ] + assert transport in available_transports + + transport = ConferenceRecordsServiceClient.get_transport_class("grpc") + assert transport == transports.ConferenceRecordsServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + ConferenceRecordsServiceClient, + transports.ConferenceRecordsServiceGrpcTransport, + "grpc", + ), + ( + ConferenceRecordsServiceAsyncClient, + transports.ConferenceRecordsServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ( + ConferenceRecordsServiceClient, + transports.ConferenceRecordsServiceRestTransport, + "rest", + ), + ], +) +@mock.patch.object( + ConferenceRecordsServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ConferenceRecordsServiceClient), +) +@mock.patch.object( + ConferenceRecordsServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ConferenceRecordsServiceAsyncClient), +) +def test_conference_records_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object( + ConferenceRecordsServiceClient, "get_transport_class" + ) as gtc: + transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object( + ConferenceRecordsServiceClient, "get_transport_class" + ) as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions( + api_audience="https://language.googleapis.com" + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com", + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + ConferenceRecordsServiceClient, + transports.ConferenceRecordsServiceGrpcTransport, + "grpc", + "true", + ), + ( + ConferenceRecordsServiceAsyncClient, + transports.ConferenceRecordsServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + ConferenceRecordsServiceClient, + transports.ConferenceRecordsServiceGrpcTransport, + "grpc", + "false", + ), + ( + ConferenceRecordsServiceAsyncClient, + transports.ConferenceRecordsServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ( + ConferenceRecordsServiceClient, + transports.ConferenceRecordsServiceRestTransport, + "rest", + "true", + ), + ( + ConferenceRecordsServiceClient, + transports.ConferenceRecordsServiceRestTransport, + "rest", + "false", + ), + ], +) +@mock.patch.object( + ConferenceRecordsServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ConferenceRecordsServiceClient), +) +@mock.patch.object( + ConferenceRecordsServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ConferenceRecordsServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_conference_records_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class", + [ConferenceRecordsServiceClient, ConferenceRecordsServiceAsyncClient], +) +@mock.patch.object( + ConferenceRecordsServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ConferenceRecordsServiceClient), +) +@mock.patch.object( + ConferenceRecordsServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ConferenceRecordsServiceAsyncClient), +) +def test_conference_records_service_client_get_mtls_endpoint_and_cert_source( + client_class, +): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_client_cert_source, + ): + ( + api_endpoint, + cert_source, + ) = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + ConferenceRecordsServiceClient, + transports.ConferenceRecordsServiceGrpcTransport, + "grpc", + ), + ( + ConferenceRecordsServiceAsyncClient, + transports.ConferenceRecordsServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ( + ConferenceRecordsServiceClient, + transports.ConferenceRecordsServiceRestTransport, + "rest", + ), + ], +) +def test_conference_records_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + ConferenceRecordsServiceClient, + transports.ConferenceRecordsServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + ConferenceRecordsServiceAsyncClient, + transports.ConferenceRecordsServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ( + ConferenceRecordsServiceClient, + transports.ConferenceRecordsServiceRestTransport, + "rest", + None, + ), + ], +) +def test_conference_records_service_client_client_options_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +def test_conference_records_service_client_client_options_from_dict(): + with mock.patch( + "google.apps.meet_v2beta.services.conference_records_service.transports.ConferenceRecordsServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = ConferenceRecordsServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + ConferenceRecordsServiceClient, + transports.ConferenceRecordsServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + ConferenceRecordsServiceAsyncClient, + transports.ConferenceRecordsServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ], +) +def test_conference_records_service_client_create_channel_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "meet.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=(), + scopes=None, + default_host="meet.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + service.GetConferenceRecordRequest, + dict, + ], +) +def test_get_conference_record(request_type, transport: str = "grpc"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_conference_record), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = resource.ConferenceRecord( + name="name_value", + space="space_value", + ) + response = client.get_conference_record(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetConferenceRecordRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, resource.ConferenceRecord) + assert response.name == "name_value" + assert response.space == "space_value" + + +def test_get_conference_record_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_conference_record), "__call__" + ) as call: + client.get_conference_record() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetConferenceRecordRequest() + + +@pytest.mark.asyncio +async def test_get_conference_record_async( + transport: str = "grpc_asyncio", request_type=service.GetConferenceRecordRequest +): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_conference_record), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + resource.ConferenceRecord( + name="name_value", + space="space_value", + ) + ) + response = await client.get_conference_record(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetConferenceRecordRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, resource.ConferenceRecord) + assert response.name == "name_value" + assert response.space == "space_value" + + +@pytest.mark.asyncio +async def test_get_conference_record_async_from_dict(): + await test_get_conference_record_async(request_type=dict) + + +def test_get_conference_record_field_headers(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetConferenceRecordRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_conference_record), "__call__" + ) as call: + call.return_value = resource.ConferenceRecord() + client.get_conference_record(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_conference_record_field_headers_async(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetConferenceRecordRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_conference_record), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + resource.ConferenceRecord() + ) + await client.get_conference_record(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_conference_record_flattened(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_conference_record), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = resource.ConferenceRecord() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_conference_record( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_conference_record_flattened_error(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_conference_record( + service.GetConferenceRecordRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_conference_record_flattened_async(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_conference_record), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = resource.ConferenceRecord() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + resource.ConferenceRecord() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_conference_record( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_conference_record_flattened_error_async(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_conference_record( + service.GetConferenceRecordRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + service.ListConferenceRecordsRequest, + dict, + ], +) +def test_list_conference_records(request_type, transport: str = "grpc"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_conference_records), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListConferenceRecordsResponse( + next_page_token="next_page_token_value", + ) + response = client.list_conference_records(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListConferenceRecordsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListConferenceRecordsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_conference_records_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_conference_records), "__call__" + ) as call: + client.list_conference_records() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListConferenceRecordsRequest() + + +@pytest.mark.asyncio +async def test_list_conference_records_async( + transport: str = "grpc_asyncio", request_type=service.ListConferenceRecordsRequest +): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_conference_records), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListConferenceRecordsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_conference_records(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListConferenceRecordsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListConferenceRecordsAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_conference_records_async_from_dict(): + await test_list_conference_records_async(request_type=dict) + + +def test_list_conference_records_pager(transport_name: str = "grpc"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_conference_records), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListConferenceRecordsResponse( + conference_records=[ + resource.ConferenceRecord(), + resource.ConferenceRecord(), + resource.ConferenceRecord(), + ], + next_page_token="abc", + ), + service.ListConferenceRecordsResponse( + conference_records=[], + next_page_token="def", + ), + service.ListConferenceRecordsResponse( + conference_records=[ + resource.ConferenceRecord(), + ], + next_page_token="ghi", + ), + service.ListConferenceRecordsResponse( + conference_records=[ + resource.ConferenceRecord(), + resource.ConferenceRecord(), + ], + ), + RuntimeError, + ) + + metadata = () + pager = client.list_conference_records(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, resource.ConferenceRecord) for i in results) + + +def test_list_conference_records_pages(transport_name: str = "grpc"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_conference_records), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListConferenceRecordsResponse( + conference_records=[ + resource.ConferenceRecord(), + resource.ConferenceRecord(), + resource.ConferenceRecord(), + ], + next_page_token="abc", + ), + service.ListConferenceRecordsResponse( + conference_records=[], + next_page_token="def", + ), + service.ListConferenceRecordsResponse( + conference_records=[ + resource.ConferenceRecord(), + ], + next_page_token="ghi", + ), + service.ListConferenceRecordsResponse( + conference_records=[ + resource.ConferenceRecord(), + resource.ConferenceRecord(), + ], + ), + RuntimeError, + ) + pages = list(client.list_conference_records(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_conference_records_async_pager(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_conference_records), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListConferenceRecordsResponse( + conference_records=[ + resource.ConferenceRecord(), + resource.ConferenceRecord(), + resource.ConferenceRecord(), + ], + next_page_token="abc", + ), + service.ListConferenceRecordsResponse( + conference_records=[], + next_page_token="def", + ), + service.ListConferenceRecordsResponse( + conference_records=[ + resource.ConferenceRecord(), + ], + next_page_token="ghi", + ), + service.ListConferenceRecordsResponse( + conference_records=[ + resource.ConferenceRecord(), + resource.ConferenceRecord(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_conference_records( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, resource.ConferenceRecord) for i in responses) + + +@pytest.mark.asyncio +async def test_list_conference_records_async_pages(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_conference_records), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListConferenceRecordsResponse( + conference_records=[ + resource.ConferenceRecord(), + resource.ConferenceRecord(), + resource.ConferenceRecord(), + ], + next_page_token="abc", + ), + service.ListConferenceRecordsResponse( + conference_records=[], + next_page_token="def", + ), + service.ListConferenceRecordsResponse( + conference_records=[ + resource.ConferenceRecord(), + ], + next_page_token="ghi", + ), + service.ListConferenceRecordsResponse( + conference_records=[ + resource.ConferenceRecord(), + resource.ConferenceRecord(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_conference_records(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + service.GetParticipantRequest, + dict, + ], +) +def test_get_participant(request_type, transport: str = "grpc"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_participant), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = resource.Participant( + name="name_value", + ) + response = client.get_participant(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetParticipantRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, resource.Participant) + assert response.name == "name_value" + + +def test_get_participant_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_participant), "__call__") as call: + client.get_participant() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetParticipantRequest() + + +@pytest.mark.asyncio +async def test_get_participant_async( + transport: str = "grpc_asyncio", request_type=service.GetParticipantRequest +): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_participant), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + resource.Participant( + name="name_value", + ) + ) + response = await client.get_participant(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetParticipantRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, resource.Participant) + assert response.name == "name_value" + + +@pytest.mark.asyncio +async def test_get_participant_async_from_dict(): + await test_get_participant_async(request_type=dict) + + +def test_get_participant_field_headers(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetParticipantRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_participant), "__call__") as call: + call.return_value = resource.Participant() + client.get_participant(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_participant_field_headers_async(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetParticipantRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_participant), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + resource.Participant() + ) + await client.get_participant(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_participant_flattened(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_participant), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = resource.Participant() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_participant( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_participant_flattened_error(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_participant( + service.GetParticipantRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_participant_flattened_async(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_participant), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = resource.Participant() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + resource.Participant() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_participant( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_participant_flattened_error_async(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_participant( + service.GetParticipantRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + service.ListParticipantsRequest, + dict, + ], +) +def test_list_participants(request_type, transport: str = "grpc"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_participants), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListParticipantsResponse( + next_page_token="next_page_token_value", + total_size=1086, + ) + response = client.list_participants(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListParticipantsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListParticipantsPager) + assert response.next_page_token == "next_page_token_value" + assert response.total_size == 1086 + + +def test_list_participants_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_participants), "__call__" + ) as call: + client.list_participants() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListParticipantsRequest() + + +@pytest.mark.asyncio +async def test_list_participants_async( + transport: str = "grpc_asyncio", request_type=service.ListParticipantsRequest +): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_participants), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListParticipantsResponse( + next_page_token="next_page_token_value", + total_size=1086, + ) + ) + response = await client.list_participants(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListParticipantsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListParticipantsAsyncPager) + assert response.next_page_token == "next_page_token_value" + assert response.total_size == 1086 + + +@pytest.mark.asyncio +async def test_list_participants_async_from_dict(): + await test_list_participants_async(request_type=dict) + + +def test_list_participants_field_headers(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListParticipantsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_participants), "__call__" + ) as call: + call.return_value = service.ListParticipantsResponse() + client.list_participants(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_participants_field_headers_async(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListParticipantsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_participants), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListParticipantsResponse() + ) + await client.list_participants(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_participants_flattened(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_participants), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListParticipantsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_participants( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +def test_list_participants_flattened_error(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_participants( + service.ListParticipantsRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_participants_flattened_async(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_participants), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListParticipantsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListParticipantsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_participants( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_participants_flattened_error_async(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_participants( + service.ListParticipantsRequest(), + parent="parent_value", + ) + + +def test_list_participants_pager(transport_name: str = "grpc"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_participants), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListParticipantsResponse( + participants=[ + resource.Participant(), + resource.Participant(), + resource.Participant(), + ], + next_page_token="abc", + ), + service.ListParticipantsResponse( + participants=[], + next_page_token="def", + ), + service.ListParticipantsResponse( + participants=[ + resource.Participant(), + ], + next_page_token="ghi", + ), + service.ListParticipantsResponse( + participants=[ + resource.Participant(), + resource.Participant(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_participants(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, resource.Participant) for i in results) + + +def test_list_participants_pages(transport_name: str = "grpc"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_participants), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListParticipantsResponse( + participants=[ + resource.Participant(), + resource.Participant(), + resource.Participant(), + ], + next_page_token="abc", + ), + service.ListParticipantsResponse( + participants=[], + next_page_token="def", + ), + service.ListParticipantsResponse( + participants=[ + resource.Participant(), + ], + next_page_token="ghi", + ), + service.ListParticipantsResponse( + participants=[ + resource.Participant(), + resource.Participant(), + ], + ), + RuntimeError, + ) + pages = list(client.list_participants(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_participants_async_pager(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_participants), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListParticipantsResponse( + participants=[ + resource.Participant(), + resource.Participant(), + resource.Participant(), + ], + next_page_token="abc", + ), + service.ListParticipantsResponse( + participants=[], + next_page_token="def", + ), + service.ListParticipantsResponse( + participants=[ + resource.Participant(), + ], + next_page_token="ghi", + ), + service.ListParticipantsResponse( + participants=[ + resource.Participant(), + resource.Participant(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_participants( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, resource.Participant) for i in responses) + + +@pytest.mark.asyncio +async def test_list_participants_async_pages(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_participants), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListParticipantsResponse( + participants=[ + resource.Participant(), + resource.Participant(), + resource.Participant(), + ], + next_page_token="abc", + ), + service.ListParticipantsResponse( + participants=[], + next_page_token="def", + ), + service.ListParticipantsResponse( + participants=[ + resource.Participant(), + ], + next_page_token="ghi", + ), + service.ListParticipantsResponse( + participants=[ + resource.Participant(), + resource.Participant(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_participants(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + service.GetParticipantSessionRequest, + dict, + ], +) +def test_get_participant_session(request_type, transport: str = "grpc"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_participant_session), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = resource.ParticipantSession( + name="name_value", + ) + response = client.get_participant_session(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetParticipantSessionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, resource.ParticipantSession) + assert response.name == "name_value" + + +def test_get_participant_session_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_participant_session), "__call__" + ) as call: + client.get_participant_session() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetParticipantSessionRequest() + + +@pytest.mark.asyncio +async def test_get_participant_session_async( + transport: str = "grpc_asyncio", request_type=service.GetParticipantSessionRequest +): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_participant_session), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + resource.ParticipantSession( + name="name_value", + ) + ) + response = await client.get_participant_session(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetParticipantSessionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, resource.ParticipantSession) + assert response.name == "name_value" + + +@pytest.mark.asyncio +async def test_get_participant_session_async_from_dict(): + await test_get_participant_session_async(request_type=dict) + + +def test_get_participant_session_field_headers(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetParticipantSessionRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_participant_session), "__call__" + ) as call: + call.return_value = resource.ParticipantSession() + client.get_participant_session(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_participant_session_field_headers_async(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetParticipantSessionRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_participant_session), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + resource.ParticipantSession() + ) + await client.get_participant_session(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_participant_session_flattened(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_participant_session), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = resource.ParticipantSession() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_participant_session( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_participant_session_flattened_error(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_participant_session( + service.GetParticipantSessionRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_participant_session_flattened_async(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_participant_session), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = resource.ParticipantSession() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + resource.ParticipantSession() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_participant_session( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_participant_session_flattened_error_async(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_participant_session( + service.GetParticipantSessionRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + service.ListParticipantSessionsRequest, + dict, + ], +) +def test_list_participant_sessions(request_type, transport: str = "grpc"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_participant_sessions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListParticipantSessionsResponse( + next_page_token="next_page_token_value", + ) + response = client.list_participant_sessions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListParticipantSessionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListParticipantSessionsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_participant_sessions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_participant_sessions), "__call__" + ) as call: + client.list_participant_sessions() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListParticipantSessionsRequest() + + +@pytest.mark.asyncio +async def test_list_participant_sessions_async( + transport: str = "grpc_asyncio", request_type=service.ListParticipantSessionsRequest +): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_participant_sessions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListParticipantSessionsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_participant_sessions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListParticipantSessionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListParticipantSessionsAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_participant_sessions_async_from_dict(): + await test_list_participant_sessions_async(request_type=dict) + + +def test_list_participant_sessions_field_headers(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListParticipantSessionsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_participant_sessions), "__call__" + ) as call: + call.return_value = service.ListParticipantSessionsResponse() + client.list_participant_sessions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_participant_sessions_field_headers_async(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListParticipantSessionsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_participant_sessions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListParticipantSessionsResponse() + ) + await client.list_participant_sessions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_participant_sessions_flattened(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_participant_sessions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListParticipantSessionsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_participant_sessions( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +def test_list_participant_sessions_flattened_error(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_participant_sessions( + service.ListParticipantSessionsRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_participant_sessions_flattened_async(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_participant_sessions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListParticipantSessionsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListParticipantSessionsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_participant_sessions( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_participant_sessions_flattened_error_async(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_participant_sessions( + service.ListParticipantSessionsRequest(), + parent="parent_value", + ) + + +def test_list_participant_sessions_pager(transport_name: str = "grpc"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_participant_sessions), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListParticipantSessionsResponse( + participant_sessions=[ + resource.ParticipantSession(), + resource.ParticipantSession(), + resource.ParticipantSession(), + ], + next_page_token="abc", + ), + service.ListParticipantSessionsResponse( + participant_sessions=[], + next_page_token="def", + ), + service.ListParticipantSessionsResponse( + participant_sessions=[ + resource.ParticipantSession(), + ], + next_page_token="ghi", + ), + service.ListParticipantSessionsResponse( + participant_sessions=[ + resource.ParticipantSession(), + resource.ParticipantSession(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_participant_sessions(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, resource.ParticipantSession) for i in results) + + +def test_list_participant_sessions_pages(transport_name: str = "grpc"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_participant_sessions), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListParticipantSessionsResponse( + participant_sessions=[ + resource.ParticipantSession(), + resource.ParticipantSession(), + resource.ParticipantSession(), + ], + next_page_token="abc", + ), + service.ListParticipantSessionsResponse( + participant_sessions=[], + next_page_token="def", + ), + service.ListParticipantSessionsResponse( + participant_sessions=[ + resource.ParticipantSession(), + ], + next_page_token="ghi", + ), + service.ListParticipantSessionsResponse( + participant_sessions=[ + resource.ParticipantSession(), + resource.ParticipantSession(), + ], + ), + RuntimeError, + ) + pages = list(client.list_participant_sessions(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_participant_sessions_async_pager(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_participant_sessions), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListParticipantSessionsResponse( + participant_sessions=[ + resource.ParticipantSession(), + resource.ParticipantSession(), + resource.ParticipantSession(), + ], + next_page_token="abc", + ), + service.ListParticipantSessionsResponse( + participant_sessions=[], + next_page_token="def", + ), + service.ListParticipantSessionsResponse( + participant_sessions=[ + resource.ParticipantSession(), + ], + next_page_token="ghi", + ), + service.ListParticipantSessionsResponse( + participant_sessions=[ + resource.ParticipantSession(), + resource.ParticipantSession(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_participant_sessions( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, resource.ParticipantSession) for i in responses) + + +@pytest.mark.asyncio +async def test_list_participant_sessions_async_pages(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_participant_sessions), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListParticipantSessionsResponse( + participant_sessions=[ + resource.ParticipantSession(), + resource.ParticipantSession(), + resource.ParticipantSession(), + ], + next_page_token="abc", + ), + service.ListParticipantSessionsResponse( + participant_sessions=[], + next_page_token="def", + ), + service.ListParticipantSessionsResponse( + participant_sessions=[ + resource.ParticipantSession(), + ], + next_page_token="ghi", + ), + service.ListParticipantSessionsResponse( + participant_sessions=[ + resource.ParticipantSession(), + resource.ParticipantSession(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_participant_sessions(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + service.GetRecordingRequest, + dict, + ], +) +def test_get_recording(request_type, transport: str = "grpc"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_recording), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = resource.Recording( + name="name_value", + state=resource.Recording.State.STARTED, + ) + response = client.get_recording(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetRecordingRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, resource.Recording) + assert response.name == "name_value" + assert response.state == resource.Recording.State.STARTED + + +def test_get_recording_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_recording), "__call__") as call: + client.get_recording() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetRecordingRequest() + + +@pytest.mark.asyncio +async def test_get_recording_async( + transport: str = "grpc_asyncio", request_type=service.GetRecordingRequest +): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_recording), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + resource.Recording( + name="name_value", + state=resource.Recording.State.STARTED, + ) + ) + response = await client.get_recording(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetRecordingRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, resource.Recording) + assert response.name == "name_value" + assert response.state == resource.Recording.State.STARTED + + +@pytest.mark.asyncio +async def test_get_recording_async_from_dict(): + await test_get_recording_async(request_type=dict) + + +def test_get_recording_field_headers(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetRecordingRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_recording), "__call__") as call: + call.return_value = resource.Recording() + client.get_recording(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_recording_field_headers_async(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetRecordingRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_recording), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resource.Recording()) + await client.get_recording(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_recording_flattened(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_recording), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = resource.Recording() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_recording( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_recording_flattened_error(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_recording( + service.GetRecordingRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_recording_flattened_async(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_recording), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = resource.Recording() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resource.Recording()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_recording( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_recording_flattened_error_async(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_recording( + service.GetRecordingRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + service.ListRecordingsRequest, + dict, + ], +) +def test_list_recordings(request_type, transport: str = "grpc"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_recordings), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListRecordingsResponse( + next_page_token="next_page_token_value", + ) + response = client.list_recordings(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListRecordingsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListRecordingsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_recordings_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_recordings), "__call__") as call: + client.list_recordings() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListRecordingsRequest() + + +@pytest.mark.asyncio +async def test_list_recordings_async( + transport: str = "grpc_asyncio", request_type=service.ListRecordingsRequest +): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_recordings), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListRecordingsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_recordings(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListRecordingsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListRecordingsAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_recordings_async_from_dict(): + await test_list_recordings_async(request_type=dict) + + +def test_list_recordings_field_headers(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListRecordingsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_recordings), "__call__") as call: + call.return_value = service.ListRecordingsResponse() + client.list_recordings(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_recordings_field_headers_async(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListRecordingsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_recordings), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListRecordingsResponse() + ) + await client.list_recordings(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_recordings_flattened(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_recordings), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListRecordingsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_recordings( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +def test_list_recordings_flattened_error(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_recordings( + service.ListRecordingsRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_recordings_flattened_async(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_recordings), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListRecordingsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListRecordingsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_recordings( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_recordings_flattened_error_async(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_recordings( + service.ListRecordingsRequest(), + parent="parent_value", + ) + + +def test_list_recordings_pager(transport_name: str = "grpc"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_recordings), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListRecordingsResponse( + recordings=[ + resource.Recording(), + resource.Recording(), + resource.Recording(), + ], + next_page_token="abc", + ), + service.ListRecordingsResponse( + recordings=[], + next_page_token="def", + ), + service.ListRecordingsResponse( + recordings=[ + resource.Recording(), + ], + next_page_token="ghi", + ), + service.ListRecordingsResponse( + recordings=[ + resource.Recording(), + resource.Recording(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_recordings(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, resource.Recording) for i in results) + + +def test_list_recordings_pages(transport_name: str = "grpc"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_recordings), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListRecordingsResponse( + recordings=[ + resource.Recording(), + resource.Recording(), + resource.Recording(), + ], + next_page_token="abc", + ), + service.ListRecordingsResponse( + recordings=[], + next_page_token="def", + ), + service.ListRecordingsResponse( + recordings=[ + resource.Recording(), + ], + next_page_token="ghi", + ), + service.ListRecordingsResponse( + recordings=[ + resource.Recording(), + resource.Recording(), + ], + ), + RuntimeError, + ) + pages = list(client.list_recordings(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_recordings_async_pager(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_recordings), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListRecordingsResponse( + recordings=[ + resource.Recording(), + resource.Recording(), + resource.Recording(), + ], + next_page_token="abc", + ), + service.ListRecordingsResponse( + recordings=[], + next_page_token="def", + ), + service.ListRecordingsResponse( + recordings=[ + resource.Recording(), + ], + next_page_token="ghi", + ), + service.ListRecordingsResponse( + recordings=[ + resource.Recording(), + resource.Recording(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_recordings( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, resource.Recording) for i in responses) + + +@pytest.mark.asyncio +async def test_list_recordings_async_pages(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_recordings), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListRecordingsResponse( + recordings=[ + resource.Recording(), + resource.Recording(), + resource.Recording(), + ], + next_page_token="abc", + ), + service.ListRecordingsResponse( + recordings=[], + next_page_token="def", + ), + service.ListRecordingsResponse( + recordings=[ + resource.Recording(), + ], + next_page_token="ghi", + ), + service.ListRecordingsResponse( + recordings=[ + resource.Recording(), + resource.Recording(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_recordings(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + service.GetTranscriptRequest, + dict, + ], +) +def test_get_transcript(request_type, transport: str = "grpc"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_transcript), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = resource.Transcript( + name="name_value", + state=resource.Transcript.State.STARTED, + ) + response = client.get_transcript(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetTranscriptRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, resource.Transcript) + assert response.name == "name_value" + assert response.state == resource.Transcript.State.STARTED + + +def test_get_transcript_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_transcript), "__call__") as call: + client.get_transcript() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetTranscriptRequest() + + +@pytest.mark.asyncio +async def test_get_transcript_async( + transport: str = "grpc_asyncio", request_type=service.GetTranscriptRequest +): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_transcript), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + resource.Transcript( + name="name_value", + state=resource.Transcript.State.STARTED, + ) + ) + response = await client.get_transcript(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetTranscriptRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, resource.Transcript) + assert response.name == "name_value" + assert response.state == resource.Transcript.State.STARTED + + +@pytest.mark.asyncio +async def test_get_transcript_async_from_dict(): + await test_get_transcript_async(request_type=dict) + + +def test_get_transcript_field_headers(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetTranscriptRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_transcript), "__call__") as call: + call.return_value = resource.Transcript() + client.get_transcript(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_transcript_field_headers_async(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetTranscriptRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_transcript), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resource.Transcript()) + await client.get_transcript(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_transcript_flattened(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_transcript), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = resource.Transcript() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_transcript( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_transcript_flattened_error(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_transcript( + service.GetTranscriptRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_transcript_flattened_async(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_transcript), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = resource.Transcript() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resource.Transcript()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_transcript( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_transcript_flattened_error_async(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_transcript( + service.GetTranscriptRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + service.ListTranscriptsRequest, + dict, + ], +) +def test_list_transcripts(request_type, transport: str = "grpc"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_transcripts), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListTranscriptsResponse( + next_page_token="next_page_token_value", + ) + response = client.list_transcripts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListTranscriptsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTranscriptsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_transcripts_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_transcripts), "__call__") as call: + client.list_transcripts() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListTranscriptsRequest() + + +@pytest.mark.asyncio +async def test_list_transcripts_async( + transport: str = "grpc_asyncio", request_type=service.ListTranscriptsRequest +): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_transcripts), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListTranscriptsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_transcripts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListTranscriptsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTranscriptsAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_transcripts_async_from_dict(): + await test_list_transcripts_async(request_type=dict) + + +def test_list_transcripts_field_headers(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListTranscriptsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_transcripts), "__call__") as call: + call.return_value = service.ListTranscriptsResponse() + client.list_transcripts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_transcripts_field_headers_async(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListTranscriptsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_transcripts), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListTranscriptsResponse() + ) + await client.list_transcripts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_transcripts_flattened(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_transcripts), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListTranscriptsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_transcripts( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +def test_list_transcripts_flattened_error(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_transcripts( + service.ListTranscriptsRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_transcripts_flattened_async(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_transcripts), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListTranscriptsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListTranscriptsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_transcripts( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_transcripts_flattened_error_async(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_transcripts( + service.ListTranscriptsRequest(), + parent="parent_value", + ) + + +def test_list_transcripts_pager(transport_name: str = "grpc"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_transcripts), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListTranscriptsResponse( + transcripts=[ + resource.Transcript(), + resource.Transcript(), + resource.Transcript(), + ], + next_page_token="abc", + ), + service.ListTranscriptsResponse( + transcripts=[], + next_page_token="def", + ), + service.ListTranscriptsResponse( + transcripts=[ + resource.Transcript(), + ], + next_page_token="ghi", + ), + service.ListTranscriptsResponse( + transcripts=[ + resource.Transcript(), + resource.Transcript(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_transcripts(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, resource.Transcript) for i in results) + + +def test_list_transcripts_pages(transport_name: str = "grpc"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_transcripts), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListTranscriptsResponse( + transcripts=[ + resource.Transcript(), + resource.Transcript(), + resource.Transcript(), + ], + next_page_token="abc", + ), + service.ListTranscriptsResponse( + transcripts=[], + next_page_token="def", + ), + service.ListTranscriptsResponse( + transcripts=[ + resource.Transcript(), + ], + next_page_token="ghi", + ), + service.ListTranscriptsResponse( + transcripts=[ + resource.Transcript(), + resource.Transcript(), + ], + ), + RuntimeError, + ) + pages = list(client.list_transcripts(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_transcripts_async_pager(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_transcripts), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListTranscriptsResponse( + transcripts=[ + resource.Transcript(), + resource.Transcript(), + resource.Transcript(), + ], + next_page_token="abc", + ), + service.ListTranscriptsResponse( + transcripts=[], + next_page_token="def", + ), + service.ListTranscriptsResponse( + transcripts=[ + resource.Transcript(), + ], + next_page_token="ghi", + ), + service.ListTranscriptsResponse( + transcripts=[ + resource.Transcript(), + resource.Transcript(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_transcripts( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, resource.Transcript) for i in responses) + + +@pytest.mark.asyncio +async def test_list_transcripts_async_pages(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_transcripts), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListTranscriptsResponse( + transcripts=[ + resource.Transcript(), + resource.Transcript(), + resource.Transcript(), + ], + next_page_token="abc", + ), + service.ListTranscriptsResponse( + transcripts=[], + next_page_token="def", + ), + service.ListTranscriptsResponse( + transcripts=[ + resource.Transcript(), + ], + next_page_token="ghi", + ), + service.ListTranscriptsResponse( + transcripts=[ + resource.Transcript(), + resource.Transcript(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_transcripts(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + service.GetTranscriptEntryRequest, + dict, + ], +) +def test_get_transcript_entry(request_type, transport: str = "grpc"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_transcript_entry), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = resource.TranscriptEntry( + name="name_value", + participant="participant_value", + text="text_value", + language_code="language_code_value", + ) + response = client.get_transcript_entry(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetTranscriptEntryRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, resource.TranscriptEntry) + assert response.name == "name_value" + assert response.participant == "participant_value" + assert response.text == "text_value" + assert response.language_code == "language_code_value" + + +def test_get_transcript_entry_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_transcript_entry), "__call__" + ) as call: + client.get_transcript_entry() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetTranscriptEntryRequest() + + +@pytest.mark.asyncio +async def test_get_transcript_entry_async( + transport: str = "grpc_asyncio", request_type=service.GetTranscriptEntryRequest +): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_transcript_entry), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + resource.TranscriptEntry( + name="name_value", + participant="participant_value", + text="text_value", + language_code="language_code_value", + ) + ) + response = await client.get_transcript_entry(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetTranscriptEntryRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, resource.TranscriptEntry) + assert response.name == "name_value" + assert response.participant == "participant_value" + assert response.text == "text_value" + assert response.language_code == "language_code_value" + + +@pytest.mark.asyncio +async def test_get_transcript_entry_async_from_dict(): + await test_get_transcript_entry_async(request_type=dict) + + +def test_get_transcript_entry_field_headers(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetTranscriptEntryRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_transcript_entry), "__call__" + ) as call: + call.return_value = resource.TranscriptEntry() + client.get_transcript_entry(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_transcript_entry_field_headers_async(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetTranscriptEntryRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_transcript_entry), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + resource.TranscriptEntry() + ) + await client.get_transcript_entry(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_transcript_entry_flattened(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_transcript_entry), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = resource.TranscriptEntry() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_transcript_entry( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_transcript_entry_flattened_error(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_transcript_entry( + service.GetTranscriptEntryRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_transcript_entry_flattened_async(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_transcript_entry), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = resource.TranscriptEntry() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + resource.TranscriptEntry() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_transcript_entry( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_transcript_entry_flattened_error_async(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_transcript_entry( + service.GetTranscriptEntryRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + service.ListTranscriptEntriesRequest, + dict, + ], +) +def test_list_transcript_entries(request_type, transport: str = "grpc"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_transcript_entries), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListTranscriptEntriesResponse( + next_page_token="next_page_token_value", + ) + response = client.list_transcript_entries(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListTranscriptEntriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTranscriptEntriesPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_transcript_entries_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_transcript_entries), "__call__" + ) as call: + client.list_transcript_entries() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListTranscriptEntriesRequest() + + +@pytest.mark.asyncio +async def test_list_transcript_entries_async( + transport: str = "grpc_asyncio", request_type=service.ListTranscriptEntriesRequest +): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_transcript_entries), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListTranscriptEntriesResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_transcript_entries(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListTranscriptEntriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTranscriptEntriesAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_transcript_entries_async_from_dict(): + await test_list_transcript_entries_async(request_type=dict) + + +def test_list_transcript_entries_field_headers(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListTranscriptEntriesRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_transcript_entries), "__call__" + ) as call: + call.return_value = service.ListTranscriptEntriesResponse() + client.list_transcript_entries(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_transcript_entries_field_headers_async(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListTranscriptEntriesRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_transcript_entries), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListTranscriptEntriesResponse() + ) + await client.list_transcript_entries(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_transcript_entries_flattened(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_transcript_entries), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListTranscriptEntriesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_transcript_entries( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +def test_list_transcript_entries_flattened_error(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_transcript_entries( + service.ListTranscriptEntriesRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_transcript_entries_flattened_async(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_transcript_entries), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListTranscriptEntriesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListTranscriptEntriesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_transcript_entries( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_transcript_entries_flattened_error_async(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_transcript_entries( + service.ListTranscriptEntriesRequest(), + parent="parent_value", + ) + + +def test_list_transcript_entries_pager(transport_name: str = "grpc"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_transcript_entries), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListTranscriptEntriesResponse( + transcript_entries=[ + resource.TranscriptEntry(), + resource.TranscriptEntry(), + resource.TranscriptEntry(), + ], + next_page_token="abc", + ), + service.ListTranscriptEntriesResponse( + transcript_entries=[], + next_page_token="def", + ), + service.ListTranscriptEntriesResponse( + transcript_entries=[ + resource.TranscriptEntry(), + ], + next_page_token="ghi", + ), + service.ListTranscriptEntriesResponse( + transcript_entries=[ + resource.TranscriptEntry(), + resource.TranscriptEntry(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_transcript_entries(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, resource.TranscriptEntry) for i in results) + + +def test_list_transcript_entries_pages(transport_name: str = "grpc"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_transcript_entries), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListTranscriptEntriesResponse( + transcript_entries=[ + resource.TranscriptEntry(), + resource.TranscriptEntry(), + resource.TranscriptEntry(), + ], + next_page_token="abc", + ), + service.ListTranscriptEntriesResponse( + transcript_entries=[], + next_page_token="def", + ), + service.ListTranscriptEntriesResponse( + transcript_entries=[ + resource.TranscriptEntry(), + ], + next_page_token="ghi", + ), + service.ListTranscriptEntriesResponse( + transcript_entries=[ + resource.TranscriptEntry(), + resource.TranscriptEntry(), + ], + ), + RuntimeError, + ) + pages = list(client.list_transcript_entries(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_transcript_entries_async_pager(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_transcript_entries), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListTranscriptEntriesResponse( + transcript_entries=[ + resource.TranscriptEntry(), + resource.TranscriptEntry(), + resource.TranscriptEntry(), + ], + next_page_token="abc", + ), + service.ListTranscriptEntriesResponse( + transcript_entries=[], + next_page_token="def", + ), + service.ListTranscriptEntriesResponse( + transcript_entries=[ + resource.TranscriptEntry(), + ], + next_page_token="ghi", + ), + service.ListTranscriptEntriesResponse( + transcript_entries=[ + resource.TranscriptEntry(), + resource.TranscriptEntry(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_transcript_entries( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, resource.TranscriptEntry) for i in responses) + + +@pytest.mark.asyncio +async def test_list_transcript_entries_async_pages(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_transcript_entries), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListTranscriptEntriesResponse( + transcript_entries=[ + resource.TranscriptEntry(), + resource.TranscriptEntry(), + resource.TranscriptEntry(), + ], + next_page_token="abc", + ), + service.ListTranscriptEntriesResponse( + transcript_entries=[], + next_page_token="def", + ), + service.ListTranscriptEntriesResponse( + transcript_entries=[ + resource.TranscriptEntry(), + ], + next_page_token="ghi", + ), + service.ListTranscriptEntriesResponse( + transcript_entries=[ + resource.TranscriptEntry(), + resource.TranscriptEntry(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_transcript_entries(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + service.GetConferenceRecordRequest, + dict, + ], +) +def test_get_conference_record_rest(request_type): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "conferenceRecords/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = resource.ConferenceRecord( + name="name_value", + space="space_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = resource.ConferenceRecord.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_conference_record(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, resource.ConferenceRecord) + assert response.name == "name_value" + assert response.space == "space_value" + + +def test_get_conference_record_rest_required_fields( + request_type=service.GetConferenceRecordRequest, +): + transport_class = transports.ConferenceRecordsServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_conference_record._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_conference_record._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = resource.ConferenceRecord() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = resource.ConferenceRecord.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_conference_record(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_conference_record_rest_unset_required_fields(): + transport = transports.ConferenceRecordsServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_conference_record._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_conference_record_rest_interceptors(null_interceptor): + transport = transports.ConferenceRecordsServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ConferenceRecordsServiceRestInterceptor(), + ) + client = ConferenceRecordsServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ConferenceRecordsServiceRestInterceptor, "post_get_conference_record" + ) as post, mock.patch.object( + transports.ConferenceRecordsServiceRestInterceptor, "pre_get_conference_record" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.GetConferenceRecordRequest.pb( + service.GetConferenceRecordRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = resource.ConferenceRecord.to_json( + resource.ConferenceRecord() + ) + + request = service.GetConferenceRecordRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = resource.ConferenceRecord() + + client.get_conference_record( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_conference_record_rest_bad_request( + transport: str = "rest", request_type=service.GetConferenceRecordRequest +): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "conferenceRecords/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_conference_record(request) + + +def test_get_conference_record_rest_flattened(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = resource.ConferenceRecord() + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "conferenceRecords/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = resource.ConferenceRecord.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_conference_record(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2beta/{name=conferenceRecords/*}" % client.transport._host, args[1] + ) + + +def test_get_conference_record_rest_flattened_error(transport: str = "rest"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_conference_record( + service.GetConferenceRecordRequest(), + name="name_value", + ) + + +def test_get_conference_record_rest_error(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + service.ListConferenceRecordsRequest, + dict, + ], +) +def test_list_conference_records_rest(request_type): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = service.ListConferenceRecordsResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = service.ListConferenceRecordsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_conference_records(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListConferenceRecordsPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_conference_records_rest_interceptors(null_interceptor): + transport = transports.ConferenceRecordsServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ConferenceRecordsServiceRestInterceptor(), + ) + client = ConferenceRecordsServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ConferenceRecordsServiceRestInterceptor, + "post_list_conference_records", + ) as post, mock.patch.object( + transports.ConferenceRecordsServiceRestInterceptor, + "pre_list_conference_records", + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.ListConferenceRecordsRequest.pb( + service.ListConferenceRecordsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = service.ListConferenceRecordsResponse.to_json( + service.ListConferenceRecordsResponse() + ) + + request = service.ListConferenceRecordsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = service.ListConferenceRecordsResponse() + + client.list_conference_records( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_conference_records_rest_bad_request( + transport: str = "rest", request_type=service.ListConferenceRecordsRequest +): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_conference_records(request) + + +def test_list_conference_records_rest_pager(transport: str = "rest"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + service.ListConferenceRecordsResponse( + conference_records=[ + resource.ConferenceRecord(), + resource.ConferenceRecord(), + resource.ConferenceRecord(), + ], + next_page_token="abc", + ), + service.ListConferenceRecordsResponse( + conference_records=[], + next_page_token="def", + ), + service.ListConferenceRecordsResponse( + conference_records=[ + resource.ConferenceRecord(), + ], + next_page_token="ghi", + ), + service.ListConferenceRecordsResponse( + conference_records=[ + resource.ConferenceRecord(), + resource.ConferenceRecord(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + service.ListConferenceRecordsResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {} + + pager = client.list_conference_records(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, resource.ConferenceRecord) for i in results) + + pages = list(client.list_conference_records(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + service.GetParticipantRequest, + dict, + ], +) +def test_get_participant_rest(request_type): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "conferenceRecords/sample1/participants/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = resource.Participant( + name="name_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = resource.Participant.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_participant(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, resource.Participant) + assert response.name == "name_value" + + +def test_get_participant_rest_required_fields( + request_type=service.GetParticipantRequest, +): + transport_class = transports.ConferenceRecordsServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_participant._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_participant._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = resource.Participant() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = resource.Participant.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_participant(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_participant_rest_unset_required_fields(): + transport = transports.ConferenceRecordsServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_participant._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_participant_rest_interceptors(null_interceptor): + transport = transports.ConferenceRecordsServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ConferenceRecordsServiceRestInterceptor(), + ) + client = ConferenceRecordsServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ConferenceRecordsServiceRestInterceptor, "post_get_participant" + ) as post, mock.patch.object( + transports.ConferenceRecordsServiceRestInterceptor, "pre_get_participant" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.GetParticipantRequest.pb(service.GetParticipantRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = resource.Participant.to_json(resource.Participant()) + + request = service.GetParticipantRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = resource.Participant() + + client.get_participant( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_participant_rest_bad_request( + transport: str = "rest", request_type=service.GetParticipantRequest +): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "conferenceRecords/sample1/participants/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_participant(request) + + +def test_get_participant_rest_flattened(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = resource.Participant() + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "conferenceRecords/sample1/participants/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = resource.Participant.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_participant(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2beta/{name=conferenceRecords/*/participants/*}" + % client.transport._host, + args[1], + ) + + +def test_get_participant_rest_flattened_error(transport: str = "rest"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_participant( + service.GetParticipantRequest(), + name="name_value", + ) + + +def test_get_participant_rest_error(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + service.ListParticipantsRequest, + dict, + ], +) +def test_list_participants_rest(request_type): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "conferenceRecords/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = service.ListParticipantsResponse( + next_page_token="next_page_token_value", + total_size=1086, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = service.ListParticipantsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_participants(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListParticipantsPager) + assert response.next_page_token == "next_page_token_value" + assert response.total_size == 1086 + + +def test_list_participants_rest_required_fields( + request_type=service.ListParticipantsRequest, +): + transport_class = transports.ConferenceRecordsServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_participants._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_participants._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "filter", + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = service.ListParticipantsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = service.ListParticipantsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_participants(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_participants_rest_unset_required_fields(): + transport = transports.ConferenceRecordsServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_participants._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "filter", + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_participants_rest_interceptors(null_interceptor): + transport = transports.ConferenceRecordsServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ConferenceRecordsServiceRestInterceptor(), + ) + client = ConferenceRecordsServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ConferenceRecordsServiceRestInterceptor, "post_list_participants" + ) as post, mock.patch.object( + transports.ConferenceRecordsServiceRestInterceptor, "pre_list_participants" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.ListParticipantsRequest.pb( + service.ListParticipantsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = service.ListParticipantsResponse.to_json( + service.ListParticipantsResponse() + ) + + request = service.ListParticipantsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = service.ListParticipantsResponse() + + client.list_participants( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_participants_rest_bad_request( + transport: str = "rest", request_type=service.ListParticipantsRequest +): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "conferenceRecords/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_participants(request) + + +def test_list_participants_rest_flattened(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = service.ListParticipantsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "conferenceRecords/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = service.ListParticipantsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.list_participants(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2beta/{parent=conferenceRecords/*}/participants" + % client.transport._host, + args[1], + ) + + +def test_list_participants_rest_flattened_error(transport: str = "rest"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_participants( + service.ListParticipantsRequest(), + parent="parent_value", + ) + + +def test_list_participants_rest_pager(transport: str = "rest"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + service.ListParticipantsResponse( + participants=[ + resource.Participant(), + resource.Participant(), + resource.Participant(), + ], + next_page_token="abc", + ), + service.ListParticipantsResponse( + participants=[], + next_page_token="def", + ), + service.ListParticipantsResponse( + participants=[ + resource.Participant(), + ], + next_page_token="ghi", + ), + service.ListParticipantsResponse( + participants=[ + resource.Participant(), + resource.Participant(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(service.ListParticipantsResponse.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "conferenceRecords/sample1"} + + pager = client.list_participants(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, resource.Participant) for i in results) + + pages = list(client.list_participants(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + service.GetParticipantSessionRequest, + dict, + ], +) +def test_get_participant_session_rest(request_type): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "conferenceRecords/sample1/participants/sample2/participantSessions/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = resource.ParticipantSession( + name="name_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = resource.ParticipantSession.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_participant_session(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, resource.ParticipantSession) + assert response.name == "name_value" + + +def test_get_participant_session_rest_required_fields( + request_type=service.GetParticipantSessionRequest, +): + transport_class = transports.ConferenceRecordsServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_participant_session._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_participant_session._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = resource.ParticipantSession() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = resource.ParticipantSession.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_participant_session(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_participant_session_rest_unset_required_fields(): + transport = transports.ConferenceRecordsServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_participant_session._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_participant_session_rest_interceptors(null_interceptor): + transport = transports.ConferenceRecordsServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ConferenceRecordsServiceRestInterceptor(), + ) + client = ConferenceRecordsServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ConferenceRecordsServiceRestInterceptor, + "post_get_participant_session", + ) as post, mock.patch.object( + transports.ConferenceRecordsServiceRestInterceptor, + "pre_get_participant_session", + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.GetParticipantSessionRequest.pb( + service.GetParticipantSessionRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = resource.ParticipantSession.to_json( + resource.ParticipantSession() + ) + + request = service.GetParticipantSessionRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = resource.ParticipantSession() + + client.get_participant_session( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_participant_session_rest_bad_request( + transport: str = "rest", request_type=service.GetParticipantSessionRequest +): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "conferenceRecords/sample1/participants/sample2/participantSessions/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_participant_session(request) + + +def test_get_participant_session_rest_flattened(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = resource.ParticipantSession() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "conferenceRecords/sample1/participants/sample2/participantSessions/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = resource.ParticipantSession.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_participant_session(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2beta/{name=conferenceRecords/*/participants/*/participantSessions/*}" + % client.transport._host, + args[1], + ) + + +def test_get_participant_session_rest_flattened_error(transport: str = "rest"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_participant_session( + service.GetParticipantSessionRequest(), + name="name_value", + ) + + +def test_get_participant_session_rest_error(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + service.ListParticipantSessionsRequest, + dict, + ], +) +def test_list_participant_sessions_rest(request_type): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "conferenceRecords/sample1/participants/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = service.ListParticipantSessionsResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = service.ListParticipantSessionsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_participant_sessions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListParticipantSessionsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_participant_sessions_rest_required_fields( + request_type=service.ListParticipantSessionsRequest, +): + transport_class = transports.ConferenceRecordsServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_participant_sessions._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_participant_sessions._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "filter", + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = service.ListParticipantSessionsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = service.ListParticipantSessionsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_participant_sessions(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_participant_sessions_rest_unset_required_fields(): + transport = transports.ConferenceRecordsServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_participant_sessions._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "filter", + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_participant_sessions_rest_interceptors(null_interceptor): + transport = transports.ConferenceRecordsServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ConferenceRecordsServiceRestInterceptor(), + ) + client = ConferenceRecordsServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ConferenceRecordsServiceRestInterceptor, + "post_list_participant_sessions", + ) as post, mock.patch.object( + transports.ConferenceRecordsServiceRestInterceptor, + "pre_list_participant_sessions", + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.ListParticipantSessionsRequest.pb( + service.ListParticipantSessionsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = service.ListParticipantSessionsResponse.to_json( + service.ListParticipantSessionsResponse() + ) + + request = service.ListParticipantSessionsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = service.ListParticipantSessionsResponse() + + client.list_participant_sessions( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_participant_sessions_rest_bad_request( + transport: str = "rest", request_type=service.ListParticipantSessionsRequest +): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "conferenceRecords/sample1/participants/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_participant_sessions(request) + + +def test_list_participant_sessions_rest_flattened(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = service.ListParticipantSessionsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "conferenceRecords/sample1/participants/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = service.ListParticipantSessionsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.list_participant_sessions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2beta/{parent=conferenceRecords/*/participants/*}/participantSessions" + % client.transport._host, + args[1], + ) + + +def test_list_participant_sessions_rest_flattened_error(transport: str = "rest"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_participant_sessions( + service.ListParticipantSessionsRequest(), + parent="parent_value", + ) + + +def test_list_participant_sessions_rest_pager(transport: str = "rest"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + service.ListParticipantSessionsResponse( + participant_sessions=[ + resource.ParticipantSession(), + resource.ParticipantSession(), + resource.ParticipantSession(), + ], + next_page_token="abc", + ), + service.ListParticipantSessionsResponse( + participant_sessions=[], + next_page_token="def", + ), + service.ListParticipantSessionsResponse( + participant_sessions=[ + resource.ParticipantSession(), + ], + next_page_token="ghi", + ), + service.ListParticipantSessionsResponse( + participant_sessions=[ + resource.ParticipantSession(), + resource.ParticipantSession(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + service.ListParticipantSessionsResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "conferenceRecords/sample1/participants/sample2"} + + pager = client.list_participant_sessions(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, resource.ParticipantSession) for i in results) + + pages = list(client.list_participant_sessions(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + service.GetRecordingRequest, + dict, + ], +) +def test_get_recording_rest(request_type): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "conferenceRecords/sample1/recordings/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = resource.Recording( + name="name_value", + state=resource.Recording.State.STARTED, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = resource.Recording.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_recording(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, resource.Recording) + assert response.name == "name_value" + assert response.state == resource.Recording.State.STARTED + + +def test_get_recording_rest_required_fields(request_type=service.GetRecordingRequest): + transport_class = transports.ConferenceRecordsServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_recording._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_recording._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = resource.Recording() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = resource.Recording.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_recording(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_recording_rest_unset_required_fields(): + transport = transports.ConferenceRecordsServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_recording._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_recording_rest_interceptors(null_interceptor): + transport = transports.ConferenceRecordsServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ConferenceRecordsServiceRestInterceptor(), + ) + client = ConferenceRecordsServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ConferenceRecordsServiceRestInterceptor, "post_get_recording" + ) as post, mock.patch.object( + transports.ConferenceRecordsServiceRestInterceptor, "pre_get_recording" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.GetRecordingRequest.pb(service.GetRecordingRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = resource.Recording.to_json(resource.Recording()) + + request = service.GetRecordingRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = resource.Recording() + + client.get_recording( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_recording_rest_bad_request( + transport: str = "rest", request_type=service.GetRecordingRequest +): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "conferenceRecords/sample1/recordings/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_recording(request) + + +def test_get_recording_rest_flattened(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = resource.Recording() + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "conferenceRecords/sample1/recordings/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = resource.Recording.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_recording(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2beta/{name=conferenceRecords/*/recordings/*}" + % client.transport._host, + args[1], + ) + + +def test_get_recording_rest_flattened_error(transport: str = "rest"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_recording( + service.GetRecordingRequest(), + name="name_value", + ) + + +def test_get_recording_rest_error(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + service.ListRecordingsRequest, + dict, + ], +) +def test_list_recordings_rest(request_type): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "conferenceRecords/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = service.ListRecordingsResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = service.ListRecordingsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_recordings(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListRecordingsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_recordings_rest_required_fields( + request_type=service.ListRecordingsRequest, +): + transport_class = transports.ConferenceRecordsServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_recordings._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_recordings._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = service.ListRecordingsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = service.ListRecordingsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_recordings(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_recordings_rest_unset_required_fields(): + transport = transports.ConferenceRecordsServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_recordings._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_recordings_rest_interceptors(null_interceptor): + transport = transports.ConferenceRecordsServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ConferenceRecordsServiceRestInterceptor(), + ) + client = ConferenceRecordsServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ConferenceRecordsServiceRestInterceptor, "post_list_recordings" + ) as post, mock.patch.object( + transports.ConferenceRecordsServiceRestInterceptor, "pre_list_recordings" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.ListRecordingsRequest.pb(service.ListRecordingsRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = service.ListRecordingsResponse.to_json( + service.ListRecordingsResponse() + ) + + request = service.ListRecordingsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = service.ListRecordingsResponse() + + client.list_recordings( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_recordings_rest_bad_request( + transport: str = "rest", request_type=service.ListRecordingsRequest +): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "conferenceRecords/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_recordings(request) + + +def test_list_recordings_rest_flattened(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = service.ListRecordingsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "conferenceRecords/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = service.ListRecordingsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.list_recordings(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2beta/{parent=conferenceRecords/*}/recordings" + % client.transport._host, + args[1], + ) + + +def test_list_recordings_rest_flattened_error(transport: str = "rest"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_recordings( + service.ListRecordingsRequest(), + parent="parent_value", + ) + + +def test_list_recordings_rest_pager(transport: str = "rest"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + service.ListRecordingsResponse( + recordings=[ + resource.Recording(), + resource.Recording(), + resource.Recording(), + ], + next_page_token="abc", + ), + service.ListRecordingsResponse( + recordings=[], + next_page_token="def", + ), + service.ListRecordingsResponse( + recordings=[ + resource.Recording(), + ], + next_page_token="ghi", + ), + service.ListRecordingsResponse( + recordings=[ + resource.Recording(), + resource.Recording(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(service.ListRecordingsResponse.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "conferenceRecords/sample1"} + + pager = client.list_recordings(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, resource.Recording) for i in results) + + pages = list(client.list_recordings(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + service.GetTranscriptRequest, + dict, + ], +) +def test_get_transcript_rest(request_type): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "conferenceRecords/sample1/transcripts/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = resource.Transcript( + name="name_value", + state=resource.Transcript.State.STARTED, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = resource.Transcript.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_transcript(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, resource.Transcript) + assert response.name == "name_value" + assert response.state == resource.Transcript.State.STARTED + + +def test_get_transcript_rest_required_fields(request_type=service.GetTranscriptRequest): + transport_class = transports.ConferenceRecordsServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_transcript._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_transcript._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = resource.Transcript() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = resource.Transcript.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_transcript(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_transcript_rest_unset_required_fields(): + transport = transports.ConferenceRecordsServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_transcript._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_transcript_rest_interceptors(null_interceptor): + transport = transports.ConferenceRecordsServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ConferenceRecordsServiceRestInterceptor(), + ) + client = ConferenceRecordsServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ConferenceRecordsServiceRestInterceptor, "post_get_transcript" + ) as post, mock.patch.object( + transports.ConferenceRecordsServiceRestInterceptor, "pre_get_transcript" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.GetTranscriptRequest.pb(service.GetTranscriptRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = resource.Transcript.to_json(resource.Transcript()) + + request = service.GetTranscriptRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = resource.Transcript() + + client.get_transcript( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_transcript_rest_bad_request( + transport: str = "rest", request_type=service.GetTranscriptRequest +): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "conferenceRecords/sample1/transcripts/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_transcript(request) + + +def test_get_transcript_rest_flattened(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = resource.Transcript() + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "conferenceRecords/sample1/transcripts/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = resource.Transcript.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_transcript(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2beta/{name=conferenceRecords/*/transcripts/*}" + % client.transport._host, + args[1], + ) + + +def test_get_transcript_rest_flattened_error(transport: str = "rest"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_transcript( + service.GetTranscriptRequest(), + name="name_value", + ) + + +def test_get_transcript_rest_error(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + service.ListTranscriptsRequest, + dict, + ], +) +def test_list_transcripts_rest(request_type): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "conferenceRecords/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = service.ListTranscriptsResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = service.ListTranscriptsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_transcripts(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTranscriptsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_transcripts_rest_required_fields( + request_type=service.ListTranscriptsRequest, +): + transport_class = transports.ConferenceRecordsServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_transcripts._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_transcripts._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = service.ListTranscriptsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = service.ListTranscriptsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_transcripts(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_transcripts_rest_unset_required_fields(): + transport = transports.ConferenceRecordsServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_transcripts._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_transcripts_rest_interceptors(null_interceptor): + transport = transports.ConferenceRecordsServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ConferenceRecordsServiceRestInterceptor(), + ) + client = ConferenceRecordsServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ConferenceRecordsServiceRestInterceptor, "post_list_transcripts" + ) as post, mock.patch.object( + transports.ConferenceRecordsServiceRestInterceptor, "pre_list_transcripts" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.ListTranscriptsRequest.pb(service.ListTranscriptsRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = service.ListTranscriptsResponse.to_json( + service.ListTranscriptsResponse() + ) + + request = service.ListTranscriptsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = service.ListTranscriptsResponse() + + client.list_transcripts( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_transcripts_rest_bad_request( + transport: str = "rest", request_type=service.ListTranscriptsRequest +): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "conferenceRecords/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_transcripts(request) + + +def test_list_transcripts_rest_flattened(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = service.ListTranscriptsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "conferenceRecords/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = service.ListTranscriptsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.list_transcripts(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2beta/{parent=conferenceRecords/*}/transcripts" + % client.transport._host, + args[1], + ) + + +def test_list_transcripts_rest_flattened_error(transport: str = "rest"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_transcripts( + service.ListTranscriptsRequest(), + parent="parent_value", + ) + + +def test_list_transcripts_rest_pager(transport: str = "rest"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + service.ListTranscriptsResponse( + transcripts=[ + resource.Transcript(), + resource.Transcript(), + resource.Transcript(), + ], + next_page_token="abc", + ), + service.ListTranscriptsResponse( + transcripts=[], + next_page_token="def", + ), + service.ListTranscriptsResponse( + transcripts=[ + resource.Transcript(), + ], + next_page_token="ghi", + ), + service.ListTranscriptsResponse( + transcripts=[ + resource.Transcript(), + resource.Transcript(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(service.ListTranscriptsResponse.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "conferenceRecords/sample1"} + + pager = client.list_transcripts(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, resource.Transcript) for i in results) + + pages = list(client.list_transcripts(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + service.GetTranscriptEntryRequest, + dict, + ], +) +def test_get_transcript_entry_rest(request_type): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "conferenceRecords/sample1/transcripts/sample2/entries/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = resource.TranscriptEntry( + name="name_value", + participant="participant_value", + text="text_value", + language_code="language_code_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = resource.TranscriptEntry.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_transcript_entry(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, resource.TranscriptEntry) + assert response.name == "name_value" + assert response.participant == "participant_value" + assert response.text == "text_value" + assert response.language_code == "language_code_value" + + +def test_get_transcript_entry_rest_required_fields( + request_type=service.GetTranscriptEntryRequest, +): + transport_class = transports.ConferenceRecordsServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_transcript_entry._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_transcript_entry._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = resource.TranscriptEntry() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = resource.TranscriptEntry.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_transcript_entry(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_transcript_entry_rest_unset_required_fields(): + transport = transports.ConferenceRecordsServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_transcript_entry._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_transcript_entry_rest_interceptors(null_interceptor): + transport = transports.ConferenceRecordsServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ConferenceRecordsServiceRestInterceptor(), + ) + client = ConferenceRecordsServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ConferenceRecordsServiceRestInterceptor, "post_get_transcript_entry" + ) as post, mock.patch.object( + transports.ConferenceRecordsServiceRestInterceptor, "pre_get_transcript_entry" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.GetTranscriptEntryRequest.pb( + service.GetTranscriptEntryRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = resource.TranscriptEntry.to_json( + resource.TranscriptEntry() + ) + + request = service.GetTranscriptEntryRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = resource.TranscriptEntry() + + client.get_transcript_entry( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_transcript_entry_rest_bad_request( + transport: str = "rest", request_type=service.GetTranscriptEntryRequest +): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "conferenceRecords/sample1/transcripts/sample2/entries/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_transcript_entry(request) + + +def test_get_transcript_entry_rest_flattened(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = resource.TranscriptEntry() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "conferenceRecords/sample1/transcripts/sample2/entries/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = resource.TranscriptEntry.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_transcript_entry(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2beta/{name=conferenceRecords/*/transcripts/*/entries/*}" + % client.transport._host, + args[1], + ) + + +def test_get_transcript_entry_rest_flattened_error(transport: str = "rest"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_transcript_entry( + service.GetTranscriptEntryRequest(), + name="name_value", + ) + + +def test_get_transcript_entry_rest_error(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + service.ListTranscriptEntriesRequest, + dict, + ], +) +def test_list_transcript_entries_rest(request_type): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "conferenceRecords/sample1/transcripts/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = service.ListTranscriptEntriesResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = service.ListTranscriptEntriesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_transcript_entries(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTranscriptEntriesPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_transcript_entries_rest_required_fields( + request_type=service.ListTranscriptEntriesRequest, +): + transport_class = transports.ConferenceRecordsServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_transcript_entries._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_transcript_entries._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = service.ListTranscriptEntriesResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = service.ListTranscriptEntriesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_transcript_entries(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_transcript_entries_rest_unset_required_fields(): + transport = transports.ConferenceRecordsServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_transcript_entries._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_transcript_entries_rest_interceptors(null_interceptor): + transport = transports.ConferenceRecordsServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ConferenceRecordsServiceRestInterceptor(), + ) + client = ConferenceRecordsServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ConferenceRecordsServiceRestInterceptor, + "post_list_transcript_entries", + ) as post, mock.patch.object( + transports.ConferenceRecordsServiceRestInterceptor, + "pre_list_transcript_entries", + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.ListTranscriptEntriesRequest.pb( + service.ListTranscriptEntriesRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = service.ListTranscriptEntriesResponse.to_json( + service.ListTranscriptEntriesResponse() + ) + + request = service.ListTranscriptEntriesRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = service.ListTranscriptEntriesResponse() + + client.list_transcript_entries( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_transcript_entries_rest_bad_request( + transport: str = "rest", request_type=service.ListTranscriptEntriesRequest +): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "conferenceRecords/sample1/transcripts/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_transcript_entries(request) + + +def test_list_transcript_entries_rest_flattened(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = service.ListTranscriptEntriesResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "conferenceRecords/sample1/transcripts/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = service.ListTranscriptEntriesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.list_transcript_entries(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2beta/{parent=conferenceRecords/*/transcripts/*}/entries" + % client.transport._host, + args[1], + ) + + +def test_list_transcript_entries_rest_flattened_error(transport: str = "rest"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_transcript_entries( + service.ListTranscriptEntriesRequest(), + parent="parent_value", + ) + + +def test_list_transcript_entries_rest_pager(transport: str = "rest"): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + service.ListTranscriptEntriesResponse( + transcript_entries=[ + resource.TranscriptEntry(), + resource.TranscriptEntry(), + resource.TranscriptEntry(), + ], + next_page_token="abc", + ), + service.ListTranscriptEntriesResponse( + transcript_entries=[], + next_page_token="def", + ), + service.ListTranscriptEntriesResponse( + transcript_entries=[ + resource.TranscriptEntry(), + ], + next_page_token="ghi", + ), + service.ListTranscriptEntriesResponse( + transcript_entries=[ + resource.TranscriptEntry(), + resource.TranscriptEntry(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + service.ListTranscriptEntriesResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "conferenceRecords/sample1/transcripts/sample2"} + + pager = client.list_transcript_entries(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, resource.TranscriptEntry) for i in results) + + pages = list(client.list_transcript_entries(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.ConferenceRecordsServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.ConferenceRecordsServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ConferenceRecordsServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.ConferenceRecordsServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = ConferenceRecordsServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = ConferenceRecordsServiceClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.ConferenceRecordsServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ConferenceRecordsServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.ConferenceRecordsServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = ConferenceRecordsServiceClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.ConferenceRecordsServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.ConferenceRecordsServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ConferenceRecordsServiceGrpcTransport, + transports.ConferenceRecordsServiceGrpcAsyncIOTransport, + transports.ConferenceRecordsServiceRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "rest", + ], +) +def test_transport_kind(transport_name): + transport = ConferenceRecordsServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.ConferenceRecordsServiceGrpcTransport, + ) + + +def test_conference_records_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.ConferenceRecordsServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_conference_records_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.apps.meet_v2beta.services.conference_records_service.transports.ConferenceRecordsServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.ConferenceRecordsServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "get_conference_record", + "list_conference_records", + "get_participant", + "list_participants", + "get_participant_session", + "list_participant_sessions", + "get_recording", + "list_recordings", + "get_transcript", + "list_transcripts", + "get_transcript_entry", + "list_transcript_entries", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_conference_records_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch( + "google.apps.meet_v2beta.services.conference_records_service.transports.ConferenceRecordsServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ConferenceRecordsServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=None, + default_scopes=(), + quota_project_id="octopus", + ) + + +def test_conference_records_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( + "google.apps.meet_v2beta.services.conference_records_service.transports.ConferenceRecordsServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ConferenceRecordsServiceTransport() + adc.assert_called_once() + + +def test_conference_records_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + ConferenceRecordsServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=(), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ConferenceRecordsServiceGrpcTransport, + transports.ConferenceRecordsServiceGrpcAsyncIOTransport, + ], +) +def test_conference_records_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=(), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ConferenceRecordsServiceGrpcTransport, + transports.ConferenceRecordsServiceGrpcAsyncIOTransport, + transports.ConferenceRecordsServiceRestTransport, + ], +) +def test_conference_records_service_transport_auth_gdch_credentials(transport_class): + host = "https://language.com" + api_audience_tests = [None, "https://language2.com"] + api_audience_expect = [host, "https://language2.com"] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, "default", autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock( + return_value=gdch_mock + ) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with(e) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.ConferenceRecordsServiceGrpcTransport, grpc_helpers), + (transports.ConferenceRecordsServiceGrpcAsyncIOTransport, grpc_helpers_async), + ], +) +def test_conference_records_service_transport_create_channel( + transport_class, grpc_helpers +): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + + create_channel.assert_called_with( + "meet.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=(), + scopes=["1", "2"], + default_host="meet.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ConferenceRecordsServiceGrpcTransport, + transports.ConferenceRecordsServiceGrpcAsyncIOTransport, + ], +) +def test_conference_records_service_grpc_transport_client_cert_source_for_mtls( + transport_class, +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_conference_records_service_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ) as mock_configure_mtls_channel: + transports.ConferenceRecordsServiceRestTransport( + credentials=cred, client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_conference_records_service_host_no_port(transport_name): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint="meet.googleapis.com"), + transport=transport_name, + ) + assert client.transport._host == ( + "meet.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://meet.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_conference_records_service_host_with_port(transport_name): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="meet.googleapis.com:8000" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "meet.googleapis.com:8000" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://meet.googleapis.com:8000" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "rest", + ], +) +def test_conference_records_service_client_transport_session_collision(transport_name): + creds1 = ga_credentials.AnonymousCredentials() + creds2 = ga_credentials.AnonymousCredentials() + client1 = ConferenceRecordsServiceClient( + credentials=creds1, + transport=transport_name, + ) + client2 = ConferenceRecordsServiceClient( + credentials=creds2, + transport=transport_name, + ) + session1 = client1.transport.get_conference_record._session + session2 = client2.transport.get_conference_record._session + assert session1 != session2 + session1 = client1.transport.list_conference_records._session + session2 = client2.transport.list_conference_records._session + assert session1 != session2 + session1 = client1.transport.get_participant._session + session2 = client2.transport.get_participant._session + assert session1 != session2 + session1 = client1.transport.list_participants._session + session2 = client2.transport.list_participants._session + assert session1 != session2 + session1 = client1.transport.get_participant_session._session + session2 = client2.transport.get_participant_session._session + assert session1 != session2 + session1 = client1.transport.list_participant_sessions._session + session2 = client2.transport.list_participant_sessions._session + assert session1 != session2 + session1 = client1.transport.get_recording._session + session2 = client2.transport.get_recording._session + assert session1 != session2 + session1 = client1.transport.list_recordings._session + session2 = client2.transport.list_recordings._session + assert session1 != session2 + session1 = client1.transport.get_transcript._session + session2 = client2.transport.get_transcript._session + assert session1 != session2 + session1 = client1.transport.list_transcripts._session + session2 = client2.transport.list_transcripts._session + assert session1 != session2 + session1 = client1.transport.get_transcript_entry._session + session2 = client2.transport.get_transcript_entry._session + assert session1 != session2 + session1 = client1.transport.list_transcript_entries._session + session2 = client2.transport.list_transcript_entries._session + assert session1 != session2 + + +def test_conference_records_service_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.ConferenceRecordsServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_conference_records_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.ConferenceRecordsServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.ConferenceRecordsServiceGrpcTransport, + transports.ConferenceRecordsServiceGrpcAsyncIOTransport, + ], +) +def test_conference_records_service_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.ConferenceRecordsServiceGrpcTransport, + transports.ConferenceRecordsServiceGrpcAsyncIOTransport, + ], +) +def test_conference_records_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_conference_record_path(): + conference_record = "squid" + expected = "conferenceRecords/{conference_record}".format( + conference_record=conference_record, + ) + actual = ConferenceRecordsServiceClient.conference_record_path(conference_record) + assert expected == actual + + +def test_parse_conference_record_path(): + expected = { + "conference_record": "clam", + } + path = ConferenceRecordsServiceClient.conference_record_path(**expected) + + # Check that the path construction is reversible. + actual = ConferenceRecordsServiceClient.parse_conference_record_path(path) + assert expected == actual + + +def test_participant_path(): + conference_record = "whelk" + participant = "octopus" + expected = ( + "conferenceRecords/{conference_record}/participants/{participant}".format( + conference_record=conference_record, + participant=participant, + ) + ) + actual = ConferenceRecordsServiceClient.participant_path( + conference_record, participant + ) + assert expected == actual + + +def test_parse_participant_path(): + expected = { + "conference_record": "oyster", + "participant": "nudibranch", + } + path = ConferenceRecordsServiceClient.participant_path(**expected) + + # Check that the path construction is reversible. + actual = ConferenceRecordsServiceClient.parse_participant_path(path) + assert expected == actual + + +def test_participant_session_path(): + conference_record = "cuttlefish" + participant = "mussel" + participant_session = "winkle" + expected = "conferenceRecords/{conference_record}/participants/{participant}/participantSessions/{participant_session}".format( + conference_record=conference_record, + participant=participant, + participant_session=participant_session, + ) + actual = ConferenceRecordsServiceClient.participant_session_path( + conference_record, participant, participant_session + ) + assert expected == actual + + +def test_parse_participant_session_path(): + expected = { + "conference_record": "nautilus", + "participant": "scallop", + "participant_session": "abalone", + } + path = ConferenceRecordsServiceClient.participant_session_path(**expected) + + # Check that the path construction is reversible. + actual = ConferenceRecordsServiceClient.parse_participant_session_path(path) + assert expected == actual + + +def test_recording_path(): + conference_record = "squid" + recording = "clam" + expected = "conferenceRecords/{conference_record}/recordings/{recording}".format( + conference_record=conference_record, + recording=recording, + ) + actual = ConferenceRecordsServiceClient.recording_path(conference_record, recording) + assert expected == actual + + +def test_parse_recording_path(): + expected = { + "conference_record": "whelk", + "recording": "octopus", + } + path = ConferenceRecordsServiceClient.recording_path(**expected) + + # Check that the path construction is reversible. + actual = ConferenceRecordsServiceClient.parse_recording_path(path) + assert expected == actual + + +def test_space_path(): + space = "oyster" + expected = "spaces/{space}".format( + space=space, + ) + actual = ConferenceRecordsServiceClient.space_path(space) + assert expected == actual + + +def test_parse_space_path(): + expected = { + "space": "nudibranch", + } + path = ConferenceRecordsServiceClient.space_path(**expected) + + # Check that the path construction is reversible. + actual = ConferenceRecordsServiceClient.parse_space_path(path) + assert expected == actual + + +def test_transcript_path(): + conference_record = "cuttlefish" + transcript = "mussel" + expected = "conferenceRecords/{conference_record}/transcripts/{transcript}".format( + conference_record=conference_record, + transcript=transcript, + ) + actual = ConferenceRecordsServiceClient.transcript_path( + conference_record, transcript + ) + assert expected == actual + + +def test_parse_transcript_path(): + expected = { + "conference_record": "winkle", + "transcript": "nautilus", + } + path = ConferenceRecordsServiceClient.transcript_path(**expected) + + # Check that the path construction is reversible. + actual = ConferenceRecordsServiceClient.parse_transcript_path(path) + assert expected == actual + + +def test_transcript_entry_path(): + conference_record = "scallop" + transcript = "abalone" + entry = "squid" + expected = "conferenceRecords/{conference_record}/transcripts/{transcript}/entries/{entry}".format( + conference_record=conference_record, + transcript=transcript, + entry=entry, + ) + actual = ConferenceRecordsServiceClient.transcript_entry_path( + conference_record, transcript, entry + ) + assert expected == actual + + +def test_parse_transcript_entry_path(): + expected = { + "conference_record": "clam", + "transcript": "whelk", + "entry": "octopus", + } + path = ConferenceRecordsServiceClient.transcript_entry_path(**expected) + + # Check that the path construction is reversible. + actual = ConferenceRecordsServiceClient.parse_transcript_entry_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "oyster" + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = ConferenceRecordsServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nudibranch", + } + path = ConferenceRecordsServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = ConferenceRecordsServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "cuttlefish" + expected = "folders/{folder}".format( + folder=folder, + ) + actual = ConferenceRecordsServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "mussel", + } + path = ConferenceRecordsServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = ConferenceRecordsServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "winkle" + expected = "organizations/{organization}".format( + organization=organization, + ) + actual = ConferenceRecordsServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nautilus", + } + path = ConferenceRecordsServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = ConferenceRecordsServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "scallop" + expected = "projects/{project}".format( + project=project, + ) + actual = ConferenceRecordsServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "abalone", + } + path = ConferenceRecordsServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = ConferenceRecordsServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "squid" + location = "clam" + expected = "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + actual = ConferenceRecordsServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "whelk", + "location": "octopus", + } + path = ConferenceRecordsServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = ConferenceRecordsServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.ConferenceRecordsServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.ConferenceRecordsServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = ConferenceRecordsServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = ConferenceRecordsServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "rest": "_session", + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "rest", + "grpc", + ] + for transport in transports: + client = ConferenceRecordsServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + + +@pytest.mark.parametrize( + "client_class,transport_class", + [ + ( + ConferenceRecordsServiceClient, + transports.ConferenceRecordsServiceGrpcTransport, + ), + ( + ConferenceRecordsServiceAsyncClient, + transports.ConferenceRecordsServiceGrpcAsyncIOTransport, + ), + ], +) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) diff --git a/packages/google-apps-meet/tests/unit/gapic/meet_v2beta/test_spaces_service.py b/packages/google-apps-meet/tests/unit/gapic/meet_v2beta/test_spaces_service.py new file mode 100644 index 000000000000..285e5f3d0e8c --- /dev/null +++ b/packages/google-apps-meet/tests/unit/gapic/meet_v2beta/test_spaces_service.py @@ -0,0 +1,3495 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER + import mock + +from collections.abc import Iterable +import json +import math + +from google.api_core import gapic_v1, grpc_helpers, grpc_helpers_async, path_template +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +import google.auth +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import json_format +import grpc +from grpc.experimental import aio +from proto.marshal.rules import wrappers +from proto.marshal.rules.dates import DurationRule, TimestampRule +import pytest +from requests import PreparedRequest, Request, Response +from requests.sessions import Session + +from google.apps.meet_v2beta.services.spaces_service import ( + SpacesServiceAsyncClient, + SpacesServiceClient, + transports, +) +from google.apps.meet_v2beta.types import resource, service + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert SpacesServiceClient._get_default_mtls_endpoint(None) is None + assert ( + SpacesServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + SpacesServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + SpacesServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + SpacesServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + SpacesServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (SpacesServiceClient, "grpc"), + (SpacesServiceAsyncClient, "grpc_asyncio"), + (SpacesServiceClient, "rest"), + ], +) +def test_spaces_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "meet.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://meet.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.SpacesServiceGrpcTransport, "grpc"), + (transports.SpacesServiceGrpcAsyncIOTransport, "grpc_asyncio"), + (transports.SpacesServiceRestTransport, "rest"), + ], +) +def test_spaces_service_client_service_account_always_use_jwt( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (SpacesServiceClient, "grpc"), + (SpacesServiceAsyncClient, "grpc_asyncio"), + (SpacesServiceClient, "rest"), + ], +) +def test_spaces_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "meet.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://meet.googleapis.com" + ) + + +def test_spaces_service_client_get_transport_class(): + transport = SpacesServiceClient.get_transport_class() + available_transports = [ + transports.SpacesServiceGrpcTransport, + transports.SpacesServiceRestTransport, + ] + assert transport in available_transports + + transport = SpacesServiceClient.get_transport_class("grpc") + assert transport == transports.SpacesServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (SpacesServiceClient, transports.SpacesServiceGrpcTransport, "grpc"), + ( + SpacesServiceAsyncClient, + transports.SpacesServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + (SpacesServiceClient, transports.SpacesServiceRestTransport, "rest"), + ], +) +@mock.patch.object( + SpacesServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(SpacesServiceClient), +) +@mock.patch.object( + SpacesServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(SpacesServiceAsyncClient), +) +def test_spaces_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(SpacesServiceClient, "get_transport_class") as gtc: + transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(SpacesServiceClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions( + api_audience="https://language.googleapis.com" + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com", + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + (SpacesServiceClient, transports.SpacesServiceGrpcTransport, "grpc", "true"), + ( + SpacesServiceAsyncClient, + transports.SpacesServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + (SpacesServiceClient, transports.SpacesServiceGrpcTransport, "grpc", "false"), + ( + SpacesServiceAsyncClient, + transports.SpacesServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + (SpacesServiceClient, transports.SpacesServiceRestTransport, "rest", "true"), + (SpacesServiceClient, transports.SpacesServiceRestTransport, "rest", "false"), + ], +) +@mock.patch.object( + SpacesServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(SpacesServiceClient), +) +@mock.patch.object( + SpacesServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(SpacesServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_spaces_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class", [SpacesServiceClient, SpacesServiceAsyncClient] +) +@mock.patch.object( + SpacesServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(SpacesServiceClient), +) +@mock.patch.object( + SpacesServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(SpacesServiceAsyncClient), +) +def test_spaces_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_client_cert_source, + ): + ( + api_endpoint, + cert_source, + ) = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (SpacesServiceClient, transports.SpacesServiceGrpcTransport, "grpc"), + ( + SpacesServiceAsyncClient, + transports.SpacesServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + (SpacesServiceClient, transports.SpacesServiceRestTransport, "rest"), + ], +) +def test_spaces_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + SpacesServiceClient, + transports.SpacesServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + SpacesServiceAsyncClient, + transports.SpacesServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + (SpacesServiceClient, transports.SpacesServiceRestTransport, "rest", None), + ], +) +def test_spaces_service_client_client_options_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +def test_spaces_service_client_client_options_from_dict(): + with mock.patch( + "google.apps.meet_v2beta.services.spaces_service.transports.SpacesServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = SpacesServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + SpacesServiceClient, + transports.SpacesServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + SpacesServiceAsyncClient, + transports.SpacesServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ], +) +def test_spaces_service_client_create_channel_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "meet.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=(), + scopes=None, + default_host="meet.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + service.CreateSpaceRequest, + dict, + ], +) +def test_create_space(request_type, transport: str = "grpc"): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_space), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = resource.Space( + name="name_value", + meeting_uri="meeting_uri_value", + meeting_code="meeting_code_value", + ) + response = client.create_space(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.CreateSpaceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, resource.Space) + assert response.name == "name_value" + assert response.meeting_uri == "meeting_uri_value" + assert response.meeting_code == "meeting_code_value" + + +def test_create_space_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_space), "__call__") as call: + client.create_space() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.CreateSpaceRequest() + + +@pytest.mark.asyncio +async def test_create_space_async( + transport: str = "grpc_asyncio", request_type=service.CreateSpaceRequest +): + client = SpacesServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_space), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + resource.Space( + name="name_value", + meeting_uri="meeting_uri_value", + meeting_code="meeting_code_value", + ) + ) + response = await client.create_space(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.CreateSpaceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, resource.Space) + assert response.name == "name_value" + assert response.meeting_uri == "meeting_uri_value" + assert response.meeting_code == "meeting_code_value" + + +@pytest.mark.asyncio +async def test_create_space_async_from_dict(): + await test_create_space_async(request_type=dict) + + +def test_create_space_flattened(): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_space), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = resource.Space() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_space( + space=resource.Space(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].space + mock_val = resource.Space(name="name_value") + assert arg == mock_val + + +def test_create_space_flattened_error(): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_space( + service.CreateSpaceRequest(), + space=resource.Space(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_space_flattened_async(): + client = SpacesServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_space), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = resource.Space() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resource.Space()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_space( + space=resource.Space(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].space + mock_val = resource.Space(name="name_value") + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_create_space_flattened_error_async(): + client = SpacesServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_space( + service.CreateSpaceRequest(), + space=resource.Space(name="name_value"), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + service.GetSpaceRequest, + dict, + ], +) +def test_get_space(request_type, transport: str = "grpc"): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_space), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = resource.Space( + name="name_value", + meeting_uri="meeting_uri_value", + meeting_code="meeting_code_value", + ) + response = client.get_space(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetSpaceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, resource.Space) + assert response.name == "name_value" + assert response.meeting_uri == "meeting_uri_value" + assert response.meeting_code == "meeting_code_value" + + +def test_get_space_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_space), "__call__") as call: + client.get_space() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetSpaceRequest() + + +@pytest.mark.asyncio +async def test_get_space_async( + transport: str = "grpc_asyncio", request_type=service.GetSpaceRequest +): + client = SpacesServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_space), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + resource.Space( + name="name_value", + meeting_uri="meeting_uri_value", + meeting_code="meeting_code_value", + ) + ) + response = await client.get_space(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetSpaceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, resource.Space) + assert response.name == "name_value" + assert response.meeting_uri == "meeting_uri_value" + assert response.meeting_code == "meeting_code_value" + + +@pytest.mark.asyncio +async def test_get_space_async_from_dict(): + await test_get_space_async(request_type=dict) + + +def test_get_space_field_headers(): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetSpaceRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_space), "__call__") as call: + call.return_value = resource.Space() + client.get_space(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_space_field_headers_async(): + client = SpacesServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetSpaceRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_space), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resource.Space()) + await client.get_space(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_space_flattened(): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_space), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = resource.Space() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_space( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_space_flattened_error(): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_space( + service.GetSpaceRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_space_flattened_async(): + client = SpacesServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_space), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = resource.Space() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resource.Space()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_space( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_space_flattened_error_async(): + client = SpacesServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_space( + service.GetSpaceRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + service.UpdateSpaceRequest, + dict, + ], +) +def test_update_space(request_type, transport: str = "grpc"): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_space), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = resource.Space( + name="name_value", + meeting_uri="meeting_uri_value", + meeting_code="meeting_code_value", + ) + response = client.update_space(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.UpdateSpaceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, resource.Space) + assert response.name == "name_value" + assert response.meeting_uri == "meeting_uri_value" + assert response.meeting_code == "meeting_code_value" + + +def test_update_space_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_space), "__call__") as call: + client.update_space() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.UpdateSpaceRequest() + + +@pytest.mark.asyncio +async def test_update_space_async( + transport: str = "grpc_asyncio", request_type=service.UpdateSpaceRequest +): + client = SpacesServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_space), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + resource.Space( + name="name_value", + meeting_uri="meeting_uri_value", + meeting_code="meeting_code_value", + ) + ) + response = await client.update_space(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.UpdateSpaceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, resource.Space) + assert response.name == "name_value" + assert response.meeting_uri == "meeting_uri_value" + assert response.meeting_code == "meeting_code_value" + + +@pytest.mark.asyncio +async def test_update_space_async_from_dict(): + await test_update_space_async(request_type=dict) + + +def test_update_space_field_headers(): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.UpdateSpaceRequest() + + request.space.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_space), "__call__") as call: + call.return_value = resource.Space() + client.update_space(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "space.name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_space_field_headers_async(): + client = SpacesServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.UpdateSpaceRequest() + + request.space.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_space), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resource.Space()) + await client.update_space(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "space.name=name_value", + ) in kw["metadata"] + + +def test_update_space_flattened(): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_space), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = resource.Space() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_space( + space=resource.Space(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].space + mock_val = resource.Space(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +def test_update_space_flattened_error(): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_space( + service.UpdateSpaceRequest(), + space=resource.Space(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_space_flattened_async(): + client = SpacesServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_space), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = resource.Space() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resource.Space()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_space( + space=resource.Space(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].space + mock_val = resource.Space(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_update_space_flattened_error_async(): + client = SpacesServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_space( + service.UpdateSpaceRequest(), + space=resource.Space(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + service.EndActiveConferenceRequest, + dict, + ], +) +def test_end_active_conference(request_type, transport: str = "grpc"): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.end_active_conference), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.end_active_conference(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.EndActiveConferenceRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_end_active_conference_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.end_active_conference), "__call__" + ) as call: + client.end_active_conference() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.EndActiveConferenceRequest() + + +@pytest.mark.asyncio +async def test_end_active_conference_async( + transport: str = "grpc_asyncio", request_type=service.EndActiveConferenceRequest +): + client = SpacesServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.end_active_conference), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.end_active_conference(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.EndActiveConferenceRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_end_active_conference_async_from_dict(): + await test_end_active_conference_async(request_type=dict) + + +def test_end_active_conference_field_headers(): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.EndActiveConferenceRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.end_active_conference), "__call__" + ) as call: + call.return_value = None + client.end_active_conference(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_end_active_conference_field_headers_async(): + client = SpacesServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.EndActiveConferenceRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.end_active_conference), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.end_active_conference(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_end_active_conference_flattened(): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.end_active_conference), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.end_active_conference( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_end_active_conference_flattened_error(): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.end_active_conference( + service.EndActiveConferenceRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_end_active_conference_flattened_async(): + client = SpacesServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.end_active_conference), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.end_active_conference( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_end_active_conference_flattened_error_async(): + client = SpacesServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.end_active_conference( + service.EndActiveConferenceRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + service.CreateSpaceRequest, + dict, + ], +) +def test_create_space_rest(request_type): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {} + request_init["space"] = { + "name": "name_value", + "meeting_uri": "meeting_uri_value", + "meeting_code": "meeting_code_value", + "config": {"access_type": 1, "entry_point_access": 1}, + "active_conference": {"conference_record": "conference_record_value"}, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = service.CreateSpaceRequest.meta.fields["space"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["space"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["space"][field])): + del request_init["space"][field][i][subfield] + else: + del request_init["space"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = resource.Space( + name="name_value", + meeting_uri="meeting_uri_value", + meeting_code="meeting_code_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = resource.Space.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.create_space(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, resource.Space) + assert response.name == "name_value" + assert response.meeting_uri == "meeting_uri_value" + assert response.meeting_code == "meeting_code_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_space_rest_interceptors(null_interceptor): + transport = transports.SpacesServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.SpacesServiceRestInterceptor(), + ) + client = SpacesServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.SpacesServiceRestInterceptor, "post_create_space" + ) as post, mock.patch.object( + transports.SpacesServiceRestInterceptor, "pre_create_space" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.CreateSpaceRequest.pb(service.CreateSpaceRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = resource.Space.to_json(resource.Space()) + + request = service.CreateSpaceRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = resource.Space() + + client.create_space( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_create_space_rest_bad_request( + transport: str = "rest", request_type=service.CreateSpaceRequest +): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_space(request) + + +def test_create_space_rest_flattened(): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = resource.Space() + + # get arguments that satisfy an http rule for this method + sample_request = {} + + # get truthy value for each flattened field + mock_args = dict( + space=resource.Space(name="name_value"), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = resource.Space.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.create_space(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2beta/spaces" % client.transport._host, args[1] + ) + + +def test_create_space_rest_flattened_error(transport: str = "rest"): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_space( + service.CreateSpaceRequest(), + space=resource.Space(name="name_value"), + ) + + +def test_create_space_rest_error(): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + service.GetSpaceRequest, + dict, + ], +) +def test_get_space_rest(request_type): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "spaces/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = resource.Space( + name="name_value", + meeting_uri="meeting_uri_value", + meeting_code="meeting_code_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = resource.Space.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_space(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, resource.Space) + assert response.name == "name_value" + assert response.meeting_uri == "meeting_uri_value" + assert response.meeting_code == "meeting_code_value" + + +def test_get_space_rest_required_fields(request_type=service.GetSpaceRequest): + transport_class = transports.SpacesServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_space._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_space._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = resource.Space() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = resource.Space.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_space(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_space_rest_unset_required_fields(): + transport = transports.SpacesServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_space._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_space_rest_interceptors(null_interceptor): + transport = transports.SpacesServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.SpacesServiceRestInterceptor(), + ) + client = SpacesServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.SpacesServiceRestInterceptor, "post_get_space" + ) as post, mock.patch.object( + transports.SpacesServiceRestInterceptor, "pre_get_space" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.GetSpaceRequest.pb(service.GetSpaceRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = resource.Space.to_json(resource.Space()) + + request = service.GetSpaceRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = resource.Space() + + client.get_space( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_space_rest_bad_request( + transport: str = "rest", request_type=service.GetSpaceRequest +): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "spaces/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_space(request) + + +def test_get_space_rest_flattened(): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = resource.Space() + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "spaces/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = resource.Space.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_space(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2beta/{name=spaces/*}" % client.transport._host, args[1] + ) + + +def test_get_space_rest_flattened_error(transport: str = "rest"): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_space( + service.GetSpaceRequest(), + name="name_value", + ) + + +def test_get_space_rest_error(): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + service.UpdateSpaceRequest, + dict, + ], +) +def test_update_space_rest(request_type): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"space": {"name": "spaces/sample1"}} + request_init["space"] = { + "name": "spaces/sample1", + "meeting_uri": "meeting_uri_value", + "meeting_code": "meeting_code_value", + "config": {"access_type": 1, "entry_point_access": 1}, + "active_conference": {"conference_record": "conference_record_value"}, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = service.UpdateSpaceRequest.meta.fields["space"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["space"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["space"][field])): + del request_init["space"][field][i][subfield] + else: + del request_init["space"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = resource.Space( + name="name_value", + meeting_uri="meeting_uri_value", + meeting_code="meeting_code_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = resource.Space.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.update_space(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, resource.Space) + assert response.name == "name_value" + assert response.meeting_uri == "meeting_uri_value" + assert response.meeting_code == "meeting_code_value" + + +def test_update_space_rest_required_fields(request_type=service.UpdateSpaceRequest): + transport_class = transports.SpacesServiceRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_space._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_space._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = resource.Space() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = resource.Space.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.update_space(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_update_space_rest_unset_required_fields(): + transport = transports.SpacesServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.update_space._get_unset_required_fields({}) + assert set(unset_fields) == (set(("updateMask",)) & set(("space",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_space_rest_interceptors(null_interceptor): + transport = transports.SpacesServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.SpacesServiceRestInterceptor(), + ) + client = SpacesServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.SpacesServiceRestInterceptor, "post_update_space" + ) as post, mock.patch.object( + transports.SpacesServiceRestInterceptor, "pre_update_space" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.UpdateSpaceRequest.pb(service.UpdateSpaceRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = resource.Space.to_json(resource.Space()) + + request = service.UpdateSpaceRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = resource.Space() + + client.update_space( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_update_space_rest_bad_request( + transport: str = "rest", request_type=service.UpdateSpaceRequest +): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"space": {"name": "spaces/sample1"}} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_space(request) + + +def test_update_space_rest_flattened(): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = resource.Space() + + # get arguments that satisfy an http rule for this method + sample_request = {"space": {"name": "spaces/sample1"}} + + # get truthy value for each flattened field + mock_args = dict( + space=resource.Space(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = resource.Space.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.update_space(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2beta/{space.name=spaces/*}" % client.transport._host, args[1] + ) + + +def test_update_space_rest_flattened_error(transport: str = "rest"): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_space( + service.UpdateSpaceRequest(), + space=resource.Space(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_update_space_rest_error(): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + service.EndActiveConferenceRequest, + dict, + ], +) +def test_end_active_conference_rest(request_type): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "spaces/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.end_active_conference(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_end_active_conference_rest_required_fields( + request_type=service.EndActiveConferenceRequest, +): + transport_class = transports.SpacesServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).end_active_conference._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).end_active_conference._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.end_active_conference(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_end_active_conference_rest_unset_required_fields(): + transport = transports.SpacesServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.end_active_conference._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_end_active_conference_rest_interceptors(null_interceptor): + transport = transports.SpacesServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.SpacesServiceRestInterceptor(), + ) + client = SpacesServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.SpacesServiceRestInterceptor, "pre_end_active_conference" + ) as pre: + pre.assert_not_called() + pb_message = service.EndActiveConferenceRequest.pb( + service.EndActiveConferenceRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + + request = service.EndActiveConferenceRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.end_active_conference( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + + +def test_end_active_conference_rest_bad_request( + transport: str = "rest", request_type=service.EndActiveConferenceRequest +): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "spaces/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.end_active_conference(request) + + +def test_end_active_conference_rest_flattened(): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "spaces/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.end_active_conference(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2beta/{name=spaces/*}:endActiveConference" % client.transport._host, + args[1], + ) + + +def test_end_active_conference_rest_flattened_error(transport: str = "rest"): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.end_active_conference( + service.EndActiveConferenceRequest(), + name="name_value", + ) + + +def test_end_active_conference_rest_error(): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.SpacesServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.SpacesServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SpacesServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.SpacesServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = SpacesServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = SpacesServiceClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.SpacesServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SpacesServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.SpacesServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = SpacesServiceClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.SpacesServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.SpacesServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.SpacesServiceGrpcTransport, + transports.SpacesServiceGrpcAsyncIOTransport, + transports.SpacesServiceRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "rest", + ], +) +def test_transport_kind(transport_name): + transport = SpacesServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.SpacesServiceGrpcTransport, + ) + + +def test_spaces_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.SpacesServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_spaces_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.apps.meet_v2beta.services.spaces_service.transports.SpacesServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.SpacesServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_space", + "get_space", + "update_space", + "end_active_conference", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_spaces_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch( + "google.apps.meet_v2beta.services.spaces_service.transports.SpacesServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.SpacesServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=None, + default_scopes=(), + quota_project_id="octopus", + ) + + +def test_spaces_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( + "google.apps.meet_v2beta.services.spaces_service.transports.SpacesServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.SpacesServiceTransport() + adc.assert_called_once() + + +def test_spaces_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + SpacesServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=(), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.SpacesServiceGrpcTransport, + transports.SpacesServiceGrpcAsyncIOTransport, + ], +) +def test_spaces_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=(), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.SpacesServiceGrpcTransport, + transports.SpacesServiceGrpcAsyncIOTransport, + transports.SpacesServiceRestTransport, + ], +) +def test_spaces_service_transport_auth_gdch_credentials(transport_class): + host = "https://language.com" + api_audience_tests = [None, "https://language2.com"] + api_audience_expect = [host, "https://language2.com"] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, "default", autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock( + return_value=gdch_mock + ) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with(e) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.SpacesServiceGrpcTransport, grpc_helpers), + (transports.SpacesServiceGrpcAsyncIOTransport, grpc_helpers_async), + ], +) +def test_spaces_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + + create_channel.assert_called_with( + "meet.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=(), + scopes=["1", "2"], + default_host="meet.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.SpacesServiceGrpcTransport, + transports.SpacesServiceGrpcAsyncIOTransport, + ], +) +def test_spaces_service_grpc_transport_client_cert_source_for_mtls(transport_class): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_spaces_service_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ) as mock_configure_mtls_channel: + transports.SpacesServiceRestTransport( + credentials=cred, client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_spaces_service_host_no_port(transport_name): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint="meet.googleapis.com"), + transport=transport_name, + ) + assert client.transport._host == ( + "meet.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://meet.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_spaces_service_host_with_port(transport_name): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="meet.googleapis.com:8000" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "meet.googleapis.com:8000" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://meet.googleapis.com:8000" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "rest", + ], +) +def test_spaces_service_client_transport_session_collision(transport_name): + creds1 = ga_credentials.AnonymousCredentials() + creds2 = ga_credentials.AnonymousCredentials() + client1 = SpacesServiceClient( + credentials=creds1, + transport=transport_name, + ) + client2 = SpacesServiceClient( + credentials=creds2, + transport=transport_name, + ) + session1 = client1.transport.create_space._session + session2 = client2.transport.create_space._session + assert session1 != session2 + session1 = client1.transport.get_space._session + session2 = client2.transport.get_space._session + assert session1 != session2 + session1 = client1.transport.update_space._session + session2 = client2.transport.update_space._session + assert session1 != session2 + session1 = client1.transport.end_active_conference._session + session2 = client2.transport.end_active_conference._session + assert session1 != session2 + + +def test_spaces_service_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.SpacesServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_spaces_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.SpacesServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.SpacesServiceGrpcTransport, + transports.SpacesServiceGrpcAsyncIOTransport, + ], +) +def test_spaces_service_transport_channel_mtls_with_client_cert_source(transport_class): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.SpacesServiceGrpcTransport, + transports.SpacesServiceGrpcAsyncIOTransport, + ], +) +def test_spaces_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_conference_record_path(): + conference_record = "squid" + expected = "conferenceRecords/{conference_record}".format( + conference_record=conference_record, + ) + actual = SpacesServiceClient.conference_record_path(conference_record) + assert expected == actual + + +def test_parse_conference_record_path(): + expected = { + "conference_record": "clam", + } + path = SpacesServiceClient.conference_record_path(**expected) + + # Check that the path construction is reversible. + actual = SpacesServiceClient.parse_conference_record_path(path) + assert expected == actual + + +def test_space_path(): + space = "whelk" + expected = "spaces/{space}".format( + space=space, + ) + actual = SpacesServiceClient.space_path(space) + assert expected == actual + + +def test_parse_space_path(): + expected = { + "space": "octopus", + } + path = SpacesServiceClient.space_path(**expected) + + # Check that the path construction is reversible. + actual = SpacesServiceClient.parse_space_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "oyster" + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = SpacesServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nudibranch", + } + path = SpacesServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = SpacesServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "cuttlefish" + expected = "folders/{folder}".format( + folder=folder, + ) + actual = SpacesServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "mussel", + } + path = SpacesServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = SpacesServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "winkle" + expected = "organizations/{organization}".format( + organization=organization, + ) + actual = SpacesServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nautilus", + } + path = SpacesServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = SpacesServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "scallop" + expected = "projects/{project}".format( + project=project, + ) + actual = SpacesServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "abalone", + } + path = SpacesServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = SpacesServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "squid" + location = "clam" + expected = "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + actual = SpacesServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "whelk", + "location": "octopus", + } + path = SpacesServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = SpacesServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.SpacesServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.SpacesServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = SpacesServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = SpacesServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "rest": "_session", + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "rest", + "grpc", + ] + for transport in transports: + client = SpacesServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + + +@pytest.mark.parametrize( + "client_class,transport_class", + [ + (SpacesServiceClient, transports.SpacesServiceGrpcTransport), + (SpacesServiceAsyncClient, transports.SpacesServiceGrpcAsyncIOTransport), + ], +) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) From edb25827aef2b2ec67c9b299c6fe1b699f5de84e Mon Sep 17 00:00:00 2001 From: yoshi-code-bot <70984784+yoshi-code-bot@users.noreply.github.com> Date: Thu, 7 Dec 2023 13:36:40 -0800 Subject: [PATCH 04/80] chore: Update release-please config files (#12102) Update release-please config files --- .release-please-manifest.json | 1 + release-please-config.json | 15 +++++++++++++++ 2 files changed, 16 insertions(+) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index dc66a872ba36..9f6658e96d01 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -2,6 +2,7 @@ "packages/google-ai-generativelanguage": "0.3.5", "packages/google-analytics-admin": "0.22.1", "packages/google-analytics-data": "0.18.1", + "packages/google-apps-meet": "0.0.0", "packages/google-apps-script-type": "0.3.5", "packages/google-area120-tables": "0.11.5", "packages/google-cloud-access-approval": "1.12.0", diff --git a/release-please-config.json b/release-please-config.json index 6485253ba0fe..0bbbc1a98a0e 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -58,6 +58,21 @@ ], "release-type": "python" }, + "packages/google-apps-meet": { + "bump-minor-pre-major": true, + "bump-patch-for-minor-pre-major": true, + "component": "google-apps-meet", + "extra-files": [ + "google/apps/meet/gapic_version.py", + "google/apps/meet_v2beta/gapic_version.py", + { + "jsonpath": "$.clientLibrary.version", + "path": "samples/generated_samples/snippet_metadata_google.apps.meet.v2beta.json", + "type": "json" + } + ], + "release-type": "python" + }, "packages/google-apps-script-type": { "bump-minor-pre-major": true, "bump-patch-for-minor-pre-major": true, From b70cbfa873a5b9d6bf75623ffeb89cf7a42a6554 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Thu, 7 Dec 2023 21:45:16 +0000 Subject: [PATCH 05/80] chore: release main (#12099) :robot: I have created a release *beep* *boop* ---
google-apps-meet: 0.1.0 ## 0.1.0 (2023-12-07) ### Features * add initial files for google.apps.meet.v2beta ([#12100](https://github.com/googleapis/google-cloud-python/issues/12100)) ([d99f5b0](https://github.com/googleapis/google-cloud-python/commit/d99f5b0ec5dcaa254bfa30dbf0495063a7a82374))
google-cloud-advisorynotifications: 0.3.3 ## [0.3.3](https://github.com/googleapis/google-cloud-python/compare/google-cloud-advisorynotifications-v0.3.2...google-cloud-advisorynotifications-v0.3.3) (2023-12-07) ### Features * Adding GetNotification and ListNotifications methods for notifications parented at the project level ([d250ab3](https://github.com/googleapis/google-cloud-python/commit/d250ab3f1c9ed29a530360899445f2d8714fc157)) * Adding project level methods to advisorynotifications.googleapis.com ([d250ab3](https://github.com/googleapis/google-cloud-python/commit/d250ab3f1c9ed29a530360899445f2d8714fc157)) ### Documentation * Adding docs for new project level methods ([d250ab3](https://github.com/googleapis/google-cloud-python/commit/d250ab3f1c9ed29a530360899445f2d8714fc157))
google-maps-places: 0.1.6 ## [0.1.6](https://github.com/googleapis/google-cloud-python/compare/google-maps-places-v0.1.5...google-maps-places-v0.1.6) (2023-12-07) ### Features * Add new primary type fields ([a74938f](https://github.com/googleapis/google-cloud-python/commit/a74938fa2ed19348d703d23ffb13545423e8b736)) * Add new short formatted address field ([a74938f](https://github.com/googleapis/google-cloud-python/commit/a74938fa2ed19348d703d23ffb13545423e8b736)) * Add new wheelchair accessibility fields ([a74938f](https://github.com/googleapis/google-cloud-python/commit/a74938fa2ed19348d703d23ffb13545423e8b736)) ### Documentation * Change comments for some fields in Places API ([a74938f](https://github.com/googleapis/google-cloud-python/commit/a74938fa2ed19348d703d23ffb13545423e8b736))
--- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- .release-please-manifest.json | 6 +++--- packages/google-apps-meet/CHANGELOG.md | 11 ++++++++++- .../google/apps/meet/gapic_version.py | 2 +- .../google/apps/meet_v2beta/gapic_version.py | 2 +- .../CHANGELOG.md | 13 +++++++++++++ .../cloud/advisorynotifications/gapic_version.py | 2 +- .../advisorynotifications_v1/gapic_version.py | 2 +- ...data_google.cloud.advisorynotifications.v1.json | 2 +- packages/google-maps-places/CHANGELOG.md | 14 ++++++++++++++ .../google/maps/places/gapic_version.py | 2 +- .../google/maps/places_v1/gapic_version.py | 2 +- .../snippet_metadata_google.maps.places.v1.json | 2 +- 12 files changed, 48 insertions(+), 12 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 9f6658e96d01..9a267657b196 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -2,11 +2,11 @@ "packages/google-ai-generativelanguage": "0.3.5", "packages/google-analytics-admin": "0.22.1", "packages/google-analytics-data": "0.18.1", - "packages/google-apps-meet": "0.0.0", + "packages/google-apps-meet": "0.1.0", "packages/google-apps-script-type": "0.3.5", "packages/google-area120-tables": "0.11.5", "packages/google-cloud-access-approval": "1.12.0", - "packages/google-cloud-advisorynotifications": "0.3.2", + "packages/google-cloud-advisorynotifications": "0.3.3", "packages/google-cloud-alloydb": "0.3.5", "packages/google-cloud-alloydb-connectors": "0.1.1", "packages/google-cloud-api-gateway": "1.8.0", @@ -163,7 +163,7 @@ "packages/google-maps-fleetengine": "0.1.2", "packages/google-maps-fleetengine-delivery": "0.1.2", "packages/google-maps-mapsplatformdatasets": "0.3.3", - "packages/google-maps-places": "0.1.5", + "packages/google-maps-places": "0.1.6", "packages/google-maps-routing": "0.6.3", "packages/google-shopping-merchant-inventories": "0.1.1", "packages/google-shopping-merchant-reports": "0.1.1", diff --git a/packages/google-apps-meet/CHANGELOG.md b/packages/google-apps-meet/CHANGELOG.md index 5ddad421e08f..6b2284406753 100644 --- a/packages/google-apps-meet/CHANGELOG.md +++ b/packages/google-apps-meet/CHANGELOG.md @@ -1 +1,10 @@ -# Changelog \ No newline at end of file +# Changelog + +## 0.1.0 (2023-12-07) + + +### Features + +* add initial files for google.apps.meet.v2beta ([#12100](https://github.com/googleapis/google-cloud-python/issues/12100)) ([d99f5b0](https://github.com/googleapis/google-cloud-python/commit/d99f5b0ec5dcaa254bfa30dbf0495063a7a82374)) + +## Changelog diff --git a/packages/google-apps-meet/google/apps/meet/gapic_version.py b/packages/google-apps-meet/google/apps/meet/gapic_version.py index 360a0d13ebdd..a7d39deb7a45 100644 --- a/packages/google-apps-meet/google/apps/meet/gapic_version.py +++ b/packages/google-apps-meet/google/apps/meet/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "0.0.0" # {x-release-please-version} +__version__ = "0.1.0" # {x-release-please-version} diff --git a/packages/google-apps-meet/google/apps/meet_v2beta/gapic_version.py b/packages/google-apps-meet/google/apps/meet_v2beta/gapic_version.py index 360a0d13ebdd..a7d39deb7a45 100644 --- a/packages/google-apps-meet/google/apps/meet_v2beta/gapic_version.py +++ b/packages/google-apps-meet/google/apps/meet_v2beta/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "0.0.0" # {x-release-please-version} +__version__ = "0.1.0" # {x-release-please-version} diff --git a/packages/google-cloud-advisorynotifications/CHANGELOG.md b/packages/google-cloud-advisorynotifications/CHANGELOG.md index b69eab338f39..adc22ece4b3c 100644 --- a/packages/google-cloud-advisorynotifications/CHANGELOG.md +++ b/packages/google-cloud-advisorynotifications/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## [0.3.3](https://github.com/googleapis/google-cloud-python/compare/google-cloud-advisorynotifications-v0.3.2...google-cloud-advisorynotifications-v0.3.3) (2023-12-07) + + +### Features + +* Adding GetNotification and ListNotifications methods for notifications parented at the project level ([d250ab3](https://github.com/googleapis/google-cloud-python/commit/d250ab3f1c9ed29a530360899445f2d8714fc157)) +* Adding project level methods to advisorynotifications.googleapis.com ([d250ab3](https://github.com/googleapis/google-cloud-python/commit/d250ab3f1c9ed29a530360899445f2d8714fc157)) + + +### Documentation + +* Adding docs for new project level methods ([d250ab3](https://github.com/googleapis/google-cloud-python/commit/d250ab3f1c9ed29a530360899445f2d8714fc157)) + ## [0.3.2](https://github.com/googleapis/google-cloud-python/compare/google-cloud-advisorynotifications-v0.3.1...google-cloud-advisorynotifications-v0.3.2) (2023-12-07) diff --git a/packages/google-cloud-advisorynotifications/google/cloud/advisorynotifications/gapic_version.py b/packages/google-cloud-advisorynotifications/google/cloud/advisorynotifications/gapic_version.py index 78e859312100..a01b131351cc 100644 --- a/packages/google-cloud-advisorynotifications/google/cloud/advisorynotifications/gapic_version.py +++ b/packages/google-cloud-advisorynotifications/google/cloud/advisorynotifications/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "0.3.2" # {x-release-please-version} +__version__ = "0.3.3" # {x-release-please-version} diff --git a/packages/google-cloud-advisorynotifications/google/cloud/advisorynotifications_v1/gapic_version.py b/packages/google-cloud-advisorynotifications/google/cloud/advisorynotifications_v1/gapic_version.py index 78e859312100..a01b131351cc 100644 --- a/packages/google-cloud-advisorynotifications/google/cloud/advisorynotifications_v1/gapic_version.py +++ b/packages/google-cloud-advisorynotifications/google/cloud/advisorynotifications_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "0.3.2" # {x-release-please-version} +__version__ = "0.3.3" # {x-release-please-version} diff --git a/packages/google-cloud-advisorynotifications/samples/generated_samples/snippet_metadata_google.cloud.advisorynotifications.v1.json b/packages/google-cloud-advisorynotifications/samples/generated_samples/snippet_metadata_google.cloud.advisorynotifications.v1.json index 8d25b4a76abe..eb033ed94522 100644 --- a/packages/google-cloud-advisorynotifications/samples/generated_samples/snippet_metadata_google.cloud.advisorynotifications.v1.json +++ b/packages/google-cloud-advisorynotifications/samples/generated_samples/snippet_metadata_google.cloud.advisorynotifications.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-advisorynotifications", - "version": "0.3.2" + "version": "0.3.3" }, "snippets": [ { diff --git a/packages/google-maps-places/CHANGELOG.md b/packages/google-maps-places/CHANGELOG.md index f7bf9b4937b3..1f07bfd90652 100644 --- a/packages/google-maps-places/CHANGELOG.md +++ b/packages/google-maps-places/CHANGELOG.md @@ -1,5 +1,19 @@ # Changelog +## [0.1.6](https://github.com/googleapis/google-cloud-python/compare/google-maps-places-v0.1.5...google-maps-places-v0.1.6) (2023-12-07) + + +### Features + +* Add new primary type fields ([a74938f](https://github.com/googleapis/google-cloud-python/commit/a74938fa2ed19348d703d23ffb13545423e8b736)) +* Add new short formatted address field ([a74938f](https://github.com/googleapis/google-cloud-python/commit/a74938fa2ed19348d703d23ffb13545423e8b736)) +* Add new wheelchair accessibility fields ([a74938f](https://github.com/googleapis/google-cloud-python/commit/a74938fa2ed19348d703d23ffb13545423e8b736)) + + +### Documentation + +* Change comments for some fields in Places API ([a74938f](https://github.com/googleapis/google-cloud-python/commit/a74938fa2ed19348d703d23ffb13545423e8b736)) + ## [0.1.5](https://github.com/googleapis/google-cloud-python/compare/google-maps-places-v0.1.4...google-maps-places-v0.1.5) (2023-12-07) diff --git a/packages/google-maps-places/google/maps/places/gapic_version.py b/packages/google-maps-places/google/maps/places/gapic_version.py index 360a0d13ebdd..60402a4567d1 100644 --- a/packages/google-maps-places/google/maps/places/gapic_version.py +++ b/packages/google-maps-places/google/maps/places/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "0.0.0" # {x-release-please-version} +__version__ = "0.1.6" # {x-release-please-version} diff --git a/packages/google-maps-places/google/maps/places_v1/gapic_version.py b/packages/google-maps-places/google/maps/places_v1/gapic_version.py index 360a0d13ebdd..60402a4567d1 100644 --- a/packages/google-maps-places/google/maps/places_v1/gapic_version.py +++ b/packages/google-maps-places/google/maps/places_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "0.0.0" # {x-release-please-version} +__version__ = "0.1.6" # {x-release-please-version} diff --git a/packages/google-maps-places/samples/generated_samples/snippet_metadata_google.maps.places.v1.json b/packages/google-maps-places/samples/generated_samples/snippet_metadata_google.maps.places.v1.json index 6444e295ff29..2379a066bd9f 100644 --- a/packages/google-maps-places/samples/generated_samples/snippet_metadata_google.maps.places.v1.json +++ b/packages/google-maps-places/samples/generated_samples/snippet_metadata_google.maps.places.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-maps-places", - "version": "0.1.0" + "version": "0.1.6" }, "snippets": [ { From 182c4cf16e7e1eef2819396a5a0b590a81af6a58 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Sat, 9 Dec 2023 08:14:40 -0500 Subject: [PATCH 06/80] feat: [google-analytics-data] add `CreateAudienceExport`, `QueryAudienceExport`, `GetAudienceExport`, `ListAudienceExports` methods to the Data API v1 beta (#12105) - [ ] Regenerate this pull request now. BEGIN_COMMIT_OVERRIDE feat: [google-analytics-data] add `CreateAudienceExport`, `QueryAudienceExport`, `GetAudienceExport`, `ListAudienceExports` methods to the Data API v1 beta feat: add `sampling_metadatas` field to `ResponseMetaData` feat: add `SamplingMetadata`, `AudienceExport`, `AudienceExportMetadata`, `AudienceDimensionValue` types fix: add `optional` label to `consumed`, `remaining` fields of the `QuotaStatus` type docs: updated comments END_COMMIT_OVERRIDE PiperOrigin-RevId: 589214160 Source-Link: https://github.com/googleapis/googleapis/commit/c3ee218d294bf315130561d7a947b06d2278f018 Source-Link: https://github.com/googleapis/googleapis-gen/commit/2ae3038701f796a5c83ff4f12d41c6ab828bb9a7 Copy-Tag: eyJwIjoicGFja2FnZXMvZ29vZ2xlLWFuYWx5dGljcy1kYXRhLy5Pd2xCb3QueWFtbCIsImgiOiIyYWUzMDM4NzAxZjc5NmE1YzgzZmY0ZjEyZDQxYzZhYjgyOGJiOWE3In0= --------- Co-authored-by: Owl Bot --- .../docs/data_v1beta/beta_analytics_data.rst | 4 + .../google/analytics/data/__init__.py | 24 + .../google/analytics/data/gapic_version.py | 2 +- .../analytics/data_v1alpha/gapic_version.py | 2 +- .../google/analytics/data_v1beta/__init__.py | 24 + .../analytics/data_v1beta/gapic_metadata.json | 60 + .../analytics/data_v1beta/gapic_version.py | 2 +- .../beta_analytics_data/async_client.py | 553 ++- .../services/beta_analytics_data/client.py | 569 ++- .../services/beta_analytics_data/pagers.py | 159 + .../beta_analytics_data/transports/base.py | 73 +- .../beta_analytics_data/transports/grpc.py | 206 +- .../transports/grpc_asyncio.py | 210 +- .../beta_analytics_data/transports/rest.py | 600 ++- .../analytics/data_v1beta/types/__init__.py | 24 + .../data_v1beta/types/analytics_data_api.py | 421 ++- .../analytics/data_v1beta/types/data.py | 83 +- ...ytics_data_create_audience_export_async.py | 60 + ...lytics_data_create_audience_export_sync.py | 60 + ...nalytics_data_get_audience_export_async.py | 52 + ...analytics_data_get_audience_export_sync.py | 52 + ...lytics_data_list_audience_exports_async.py | 53 + ...alytics_data_list_audience_exports_sync.py | 53 + ...lytics_data_query_audience_export_async.py | 52 + ...alytics_data_query_audience_export_sync.py | 52 + ...etadata_google.analytics.data.v1alpha.json | 2 +- ...metadata_google.analytics.data.v1beta.json | 680 +++- .../scripts/fixup_data_v1beta_keywords.py | 4 + .../data_v1beta/test_beta_analytics_data.py | 3319 +++++++++++++++-- 29 files changed, 7053 insertions(+), 402 deletions(-) create mode 100644 packages/google-analytics-data/google/analytics/data_v1beta/services/beta_analytics_data/pagers.py create mode 100644 packages/google-analytics-data/samples/generated_samples/analyticsdata_v1beta_generated_beta_analytics_data_create_audience_export_async.py create mode 100644 packages/google-analytics-data/samples/generated_samples/analyticsdata_v1beta_generated_beta_analytics_data_create_audience_export_sync.py create mode 100644 packages/google-analytics-data/samples/generated_samples/analyticsdata_v1beta_generated_beta_analytics_data_get_audience_export_async.py create mode 100644 packages/google-analytics-data/samples/generated_samples/analyticsdata_v1beta_generated_beta_analytics_data_get_audience_export_sync.py create mode 100644 packages/google-analytics-data/samples/generated_samples/analyticsdata_v1beta_generated_beta_analytics_data_list_audience_exports_async.py create mode 100644 packages/google-analytics-data/samples/generated_samples/analyticsdata_v1beta_generated_beta_analytics_data_list_audience_exports_sync.py create mode 100644 packages/google-analytics-data/samples/generated_samples/analyticsdata_v1beta_generated_beta_analytics_data_query_audience_export_async.py create mode 100644 packages/google-analytics-data/samples/generated_samples/analyticsdata_v1beta_generated_beta_analytics_data_query_audience_export_sync.py diff --git a/packages/google-analytics-data/docs/data_v1beta/beta_analytics_data.rst b/packages/google-analytics-data/docs/data_v1beta/beta_analytics_data.rst index 7bcaa51b6b5e..b5c0de656c3f 100644 --- a/packages/google-analytics-data/docs/data_v1beta/beta_analytics_data.rst +++ b/packages/google-analytics-data/docs/data_v1beta/beta_analytics_data.rst @@ -4,3 +4,7 @@ BetaAnalyticsData .. automodule:: google.analytics.data_v1beta.services.beta_analytics_data :members: :inherited-members: + +.. automodule:: google.analytics.data_v1beta.services.beta_analytics_data.pagers + :members: + :inherited-members: diff --git a/packages/google-analytics-data/google/analytics/data/__init__.py b/packages/google-analytics-data/google/analytics/data/__init__.py index f86a037d1df2..6415f667e119 100644 --- a/packages/google-analytics-data/google/analytics/data/__init__.py +++ b/packages/google-analytics-data/google/analytics/data/__init__.py @@ -25,14 +25,25 @@ BetaAnalyticsDataClient, ) from google.analytics.data_v1beta.types.analytics_data_api import ( + AudienceDimension, + AudienceDimensionValue, + AudienceExport, + AudienceExportMetadata, + AudienceRow, BatchRunPivotReportsRequest, BatchRunPivotReportsResponse, BatchRunReportsRequest, BatchRunReportsResponse, CheckCompatibilityRequest, CheckCompatibilityResponse, + CreateAudienceExportRequest, + GetAudienceExportRequest, GetMetadataRequest, + ListAudienceExportsRequest, + ListAudienceExportsResponse, Metadata, + QueryAudienceExportRequest, + QueryAudienceExportResponse, RunPivotReportRequest, RunPivotReportResponse, RunRealtimeReportRequest, @@ -74,19 +85,31 @@ ResponseMetaData, RestrictedMetricType, Row, + SamplingMetadata, ) __all__ = ( "BetaAnalyticsDataClient", "BetaAnalyticsDataAsyncClient", + "AudienceDimension", + "AudienceDimensionValue", + "AudienceExport", + "AudienceExportMetadata", + "AudienceRow", "BatchRunPivotReportsRequest", "BatchRunPivotReportsResponse", "BatchRunReportsRequest", "BatchRunReportsResponse", "CheckCompatibilityRequest", "CheckCompatibilityResponse", + "CreateAudienceExportRequest", + "GetAudienceExportRequest", "GetMetadataRequest", + "ListAudienceExportsRequest", + "ListAudienceExportsResponse", "Metadata", + "QueryAudienceExportRequest", + "QueryAudienceExportResponse", "RunPivotReportRequest", "RunPivotReportResponse", "RunRealtimeReportRequest", @@ -122,6 +145,7 @@ "QuotaStatus", "ResponseMetaData", "Row", + "SamplingMetadata", "Compatibility", "MetricAggregation", "MetricType", diff --git a/packages/google-analytics-data/google/analytics/data/gapic_version.py b/packages/google-analytics-data/google/analytics/data/gapic_version.py index 10aeefebafee..360a0d13ebdd 100644 --- a/packages/google-analytics-data/google/analytics/data/gapic_version.py +++ b/packages/google-analytics-data/google/analytics/data/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "0.18.1" # {x-release-please-version} +__version__ = "0.0.0" # {x-release-please-version} diff --git a/packages/google-analytics-data/google/analytics/data_v1alpha/gapic_version.py b/packages/google-analytics-data/google/analytics/data_v1alpha/gapic_version.py index 10aeefebafee..360a0d13ebdd 100644 --- a/packages/google-analytics-data/google/analytics/data_v1alpha/gapic_version.py +++ b/packages/google-analytics-data/google/analytics/data_v1alpha/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "0.18.1" # {x-release-please-version} +__version__ = "0.0.0" # {x-release-please-version} diff --git a/packages/google-analytics-data/google/analytics/data_v1beta/__init__.py b/packages/google-analytics-data/google/analytics/data_v1beta/__init__.py index 8ace2e93547f..037f237e3ad6 100644 --- a/packages/google-analytics-data/google/analytics/data_v1beta/__init__.py +++ b/packages/google-analytics-data/google/analytics/data_v1beta/__init__.py @@ -23,14 +23,25 @@ BetaAnalyticsDataClient, ) from .types.analytics_data_api import ( + AudienceDimension, + AudienceDimensionValue, + AudienceExport, + AudienceExportMetadata, + AudienceRow, BatchRunPivotReportsRequest, BatchRunPivotReportsResponse, BatchRunReportsRequest, BatchRunReportsResponse, CheckCompatibilityRequest, CheckCompatibilityResponse, + CreateAudienceExportRequest, + GetAudienceExportRequest, GetMetadataRequest, + ListAudienceExportsRequest, + ListAudienceExportsResponse, Metadata, + QueryAudienceExportRequest, + QueryAudienceExportResponse, RunPivotReportRequest, RunPivotReportResponse, RunRealtimeReportRequest, @@ -72,10 +83,16 @@ ResponseMetaData, RestrictedMetricType, Row, + SamplingMetadata, ) __all__ = ( "BetaAnalyticsDataAsyncClient", + "AudienceDimension", + "AudienceDimensionValue", + "AudienceExport", + "AudienceExportMetadata", + "AudienceRow", "BatchRunPivotReportsRequest", "BatchRunPivotReportsResponse", "BatchRunReportsRequest", @@ -88,6 +105,7 @@ "CohortSpec", "CohortsRange", "Compatibility", + "CreateAudienceExportRequest", "DateRange", "Dimension", "DimensionCompatibility", @@ -98,7 +116,10 @@ "Filter", "FilterExpression", "FilterExpressionList", + "GetAudienceExportRequest", "GetMetadataRequest", + "ListAudienceExportsRequest", + "ListAudienceExportsResponse", "Metadata", "Metric", "MetricAggregation", @@ -114,6 +135,8 @@ "PivotDimensionHeader", "PivotHeader", "PropertyQuota", + "QueryAudienceExportRequest", + "QueryAudienceExportResponse", "QuotaStatus", "ResponseMetaData", "RestrictedMetricType", @@ -124,4 +147,5 @@ "RunRealtimeReportResponse", "RunReportRequest", "RunReportResponse", + "SamplingMetadata", ) diff --git a/packages/google-analytics-data/google/analytics/data_v1beta/gapic_metadata.json b/packages/google-analytics-data/google/analytics/data_v1beta/gapic_metadata.json index e1e3750c4252..a00d9bb919b6 100644 --- a/packages/google-analytics-data/google/analytics/data_v1beta/gapic_metadata.json +++ b/packages/google-analytics-data/google/analytics/data_v1beta/gapic_metadata.json @@ -25,11 +25,31 @@ "check_compatibility" ] }, + "CreateAudienceExport": { + "methods": [ + "create_audience_export" + ] + }, + "GetAudienceExport": { + "methods": [ + "get_audience_export" + ] + }, "GetMetadata": { "methods": [ "get_metadata" ] }, + "ListAudienceExports": { + "methods": [ + "list_audience_exports" + ] + }, + "QueryAudienceExport": { + "methods": [ + "query_audience_export" + ] + }, "RunPivotReport": { "methods": [ "run_pivot_report" @@ -65,11 +85,31 @@ "check_compatibility" ] }, + "CreateAudienceExport": { + "methods": [ + "create_audience_export" + ] + }, + "GetAudienceExport": { + "methods": [ + "get_audience_export" + ] + }, "GetMetadata": { "methods": [ "get_metadata" ] }, + "ListAudienceExports": { + "methods": [ + "list_audience_exports" + ] + }, + "QueryAudienceExport": { + "methods": [ + "query_audience_export" + ] + }, "RunPivotReport": { "methods": [ "run_pivot_report" @@ -105,11 +145,31 @@ "check_compatibility" ] }, + "CreateAudienceExport": { + "methods": [ + "create_audience_export" + ] + }, + "GetAudienceExport": { + "methods": [ + "get_audience_export" + ] + }, "GetMetadata": { "methods": [ "get_metadata" ] }, + "ListAudienceExports": { + "methods": [ + "list_audience_exports" + ] + }, + "QueryAudienceExport": { + "methods": [ + "query_audience_export" + ] + }, "RunPivotReport": { "methods": [ "run_pivot_report" diff --git a/packages/google-analytics-data/google/analytics/data_v1beta/gapic_version.py b/packages/google-analytics-data/google/analytics/data_v1beta/gapic_version.py index 10aeefebafee..360a0d13ebdd 100644 --- a/packages/google-analytics-data/google/analytics/data_v1beta/gapic_version.py +++ b/packages/google-analytics-data/google/analytics/data_v1beta/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "0.18.1" # {x-release-please-version} +__version__ = "0.0.0" # {x-release-please-version} diff --git a/packages/google-analytics-data/google/analytics/data_v1beta/services/beta_analytics_data/async_client.py b/packages/google-analytics-data/google/analytics/data_v1beta/services/beta_analytics_data/async_client.py index 704b9e3ecad7..0d923673401f 100644 --- a/packages/google-analytics-data/google/analytics/data_v1beta/services/beta_analytics_data/async_client.py +++ b/packages/google-analytics-data/google/analytics/data_v1beta/services/beta_analytics_data/async_client.py @@ -42,6 +42,12 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + +from google.analytics.data_v1beta.services.beta_analytics_data import pagers from google.analytics.data_v1beta.types import analytics_data_api, data from .client import BetaAnalyticsDataClient @@ -57,6 +63,10 @@ class BetaAnalyticsDataAsyncClient: DEFAULT_ENDPOINT = BetaAnalyticsDataClient.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = BetaAnalyticsDataClient.DEFAULT_MTLS_ENDPOINT + audience_export_path = staticmethod(BetaAnalyticsDataClient.audience_export_path) + parse_audience_export_path = staticmethod( + BetaAnalyticsDataClient.parse_audience_export_path + ) metadata_path = staticmethod(BetaAnalyticsDataClient.metadata_path) parse_metadata_path = staticmethod(BetaAnalyticsDataClient.parse_metadata_path) common_billing_account_path = staticmethod( @@ -626,8 +636,9 @@ async def sample_get_metadata(): Returns: google.analytics.data_v1beta.types.Metadata: - The dimensions and metrics currently - accepted in reporting methods. + The dimensions, metrics and + comparisons currently accepted in + reporting methods. """ # Create or coerce a protobuf request object. @@ -859,6 +870,544 @@ async def sample_check_compatibility(): # Done; return the response. return response + async def create_audience_export( + self, + request: Optional[ + Union[analytics_data_api.CreateAudienceExportRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + audience_export: Optional[analytics_data_api.AudienceExport] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates an audience export for later retrieval. This method + quickly returns the audience export's resource name and + initiates a long running asynchronous request to form an + audience export. To export the users in an audience export, + first create the audience export through this method and then + send the audience resource name to the ``QueryAudienceExport`` + method. + + See `Creating an Audience + Export `__ + for an introduction to Audience Exports with examples. + + An audience export is a snapshot of the users currently in the + audience at the time of audience export creation. Creating + audience exports for one audience on different days will return + different results as users enter and exit the audience. + + Audiences in Google Analytics 4 allow you to segment your users + in the ways that are important to your business. To learn more, + see https://support.google.com/analytics/answer/9267572. + Audience exports contain the users in each audience. + + Audience Export APIs have some methods at alpha and other + methods at beta stability. The intention is to advance methods + to beta stability after some feedback and adoption. To give your + feedback on this API, complete the `Google Analytics Audience + Export API Feedback `__ + form. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.analytics import data_v1beta + + async def sample_create_audience_export(): + # Create a client + client = data_v1beta.BetaAnalyticsDataAsyncClient() + + # Initialize request argument(s) + audience_export = data_v1beta.AudienceExport() + audience_export.audience = "audience_value" + + request = data_v1beta.CreateAudienceExportRequest( + parent="parent_value", + audience_export=audience_export, + ) + + # Make the request + operation = client.create_audience_export(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.analytics.data_v1beta.types.CreateAudienceExportRequest, dict]]): + The request object. A request to create a new audience + export. + parent (:class:`str`): + Required. The parent resource where this audience export + will be created. Format: ``properties/{property}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + audience_export (:class:`google.analytics.data_v1beta.types.AudienceExport`): + Required. The audience export to + create. + + This corresponds to the ``audience_export`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.analytics.data_v1beta.types.AudienceExport` An audience export is a list of users in an audience at the time of the + list's creation. One audience may have multiple + audience exports created for different days. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, audience_export]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = analytics_data_api.CreateAudienceExportRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if audience_export is not None: + request.audience_export = audience_export + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_audience_export, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + analytics_data_api.AudienceExport, + metadata_type=analytics_data_api.AudienceExportMetadata, + ) + + # Done; return the response. + return response + + async def query_audience_export( + self, + request: Optional[ + Union[analytics_data_api.QueryAudienceExportRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> analytics_data_api.QueryAudienceExportResponse: + r"""Retrieves an audience export of users. After creating an + audience, the users are not immediately available for exporting. + First, a request to ``CreateAudienceExport`` is necessary to + create an audience export of users, and then second, this method + is used to retrieve the users in the audience export. + + See `Creating an Audience + Export `__ + for an introduction to Audience Exports with examples. + + Audiences in Google Analytics 4 allow you to segment your users + in the ways that are important to your business. To learn more, + see https://support.google.com/analytics/answer/9267572. + + Audience Export APIs have some methods at alpha and other + methods at beta stability. The intention is to advance methods + to beta stability after some feedback and adoption. To give your + feedback on this API, complete the `Google Analytics Audience + Export API Feedback `__ + form. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.analytics import data_v1beta + + async def sample_query_audience_export(): + # Create a client + client = data_v1beta.BetaAnalyticsDataAsyncClient() + + # Initialize request argument(s) + request = data_v1beta.QueryAudienceExportRequest( + name="name_value", + ) + + # Make the request + response = await client.query_audience_export(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.analytics.data_v1beta.types.QueryAudienceExportRequest, dict]]): + The request object. A request to list users in an + audience export. + name (:class:`str`): + Required. The name of the audience export to retrieve + users from. Format: + ``properties/{property}/audienceExports/{audience_export}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.analytics.data_v1beta.types.QueryAudienceExportResponse: + A list of users in an audience + export. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = analytics_data_api.QueryAudienceExportRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.query_audience_export, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_audience_export( + self, + request: Optional[ + Union[analytics_data_api.GetAudienceExportRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> analytics_data_api.AudienceExport: + r"""Gets configuration metadata about a specific audience export. + This method can be used to understand an audience export after + it has been created. + + See `Creating an Audience + Export `__ + for an introduction to Audience Exports with examples. + + Audience Export APIs have some methods at alpha and other + methods at beta stability. The intention is to advance methods + to beta stability after some feedback and adoption. To give your + feedback on this API, complete the `Google Analytics Audience + Export API Feedback `__ + form. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.analytics import data_v1beta + + async def sample_get_audience_export(): + # Create a client + client = data_v1beta.BetaAnalyticsDataAsyncClient() + + # Initialize request argument(s) + request = data_v1beta.GetAudienceExportRequest( + name="name_value", + ) + + # Make the request + response = await client.get_audience_export(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.analytics.data_v1beta.types.GetAudienceExportRequest, dict]]): + The request object. A request to retrieve configuration + metadata about a specific audience + export. + name (:class:`str`): + Required. The audience export resource name. Format: + ``properties/{property}/audienceExports/{audience_export}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.analytics.data_v1beta.types.AudienceExport: + An audience export is a list of users + in an audience at the time of the list's + creation. One audience may have multiple + audience exports created for different + days. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = analytics_data_api.GetAudienceExportRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_audience_export, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_audience_exports( + self, + request: Optional[ + Union[analytics_data_api.ListAudienceExportsRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListAudienceExportsAsyncPager: + r"""Lists all audience exports for a property. This method can be + used for you to find and reuse existing audience exports rather + than creating unnecessary new audience exports. The same + audience can have multiple audience exports that represent the + export of users that were in an audience on different days. + + See `Creating an Audience + Export `__ + for an introduction to Audience Exports with examples. + + Audience Export APIs have some methods at alpha and other + methods at beta stability. The intention is to advance methods + to beta stability after some feedback and adoption. To give your + feedback on this API, complete the `Google Analytics Audience + Export API Feedback `__ + form. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.analytics import data_v1beta + + async def sample_list_audience_exports(): + # Create a client + client = data_v1beta.BetaAnalyticsDataAsyncClient() + + # Initialize request argument(s) + request = data_v1beta.ListAudienceExportsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_audience_exports(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.analytics.data_v1beta.types.ListAudienceExportsRequest, dict]]): + The request object. A request to list all audience + exports for a property. + parent (:class:`str`): + Required. All audience exports for this property will be + listed in the response. Format: + ``properties/{property}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.analytics.data_v1beta.services.beta_analytics_data.pagers.ListAudienceExportsAsyncPager: + A list of all audience exports for a + property. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = analytics_data_api.ListAudienceExportsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_audience_exports, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListAudienceExportsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + async def __aenter__(self) -> "BetaAnalyticsDataAsyncClient": return self diff --git a/packages/google-analytics-data/google/analytics/data_v1beta/services/beta_analytics_data/client.py b/packages/google-analytics-data/google/analytics/data_v1beta/services/beta_analytics_data/client.py index b0659c09a374..64d8e9aa9fd8 100644 --- a/packages/google-analytics-data/google/analytics/data_v1beta/services/beta_analytics_data/client.py +++ b/packages/google-analytics-data/google/analytics/data_v1beta/services/beta_analytics_data/client.py @@ -46,6 +46,12 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object] # type: ignore +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + +from google.analytics.data_v1beta.services.beta_analytics_data import pagers from google.analytics.data_v1beta.types import analytics_data_api, data from .transports.base import DEFAULT_CLIENT_INFO, BetaAnalyticsDataTransport @@ -176,6 +182,26 @@ def transport(self) -> BetaAnalyticsDataTransport: """ return self._transport + @staticmethod + def audience_export_path( + property: str, + audience_export: str, + ) -> str: + """Returns a fully-qualified audience_export string.""" + return "properties/{property}/audienceExports/{audience_export}".format( + property=property, + audience_export=audience_export, + ) + + @staticmethod + def parse_audience_export_path(path: str) -> Dict[str, str]: + """Parses a audience_export path into its component segments.""" + m = re.match( + r"^properties/(?P.+?)/audienceExports/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def metadata_path( property: str, @@ -852,8 +878,9 @@ def sample_get_metadata(): Returns: google.analytics.data_v1beta.types.Metadata: - The dimensions and metrics currently - accepted in reporting methods. + The dimensions, metrics and + comparisons currently accepted in + reporting methods. """ # Create or coerce a protobuf request object. @@ -1087,6 +1114,544 @@ def sample_check_compatibility(): # Done; return the response. return response + def create_audience_export( + self, + request: Optional[ + Union[analytics_data_api.CreateAudienceExportRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + audience_export: Optional[analytics_data_api.AudienceExport] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Creates an audience export for later retrieval. This method + quickly returns the audience export's resource name and + initiates a long running asynchronous request to form an + audience export. To export the users in an audience export, + first create the audience export through this method and then + send the audience resource name to the ``QueryAudienceExport`` + method. + + See `Creating an Audience + Export `__ + for an introduction to Audience Exports with examples. + + An audience export is a snapshot of the users currently in the + audience at the time of audience export creation. Creating + audience exports for one audience on different days will return + different results as users enter and exit the audience. + + Audiences in Google Analytics 4 allow you to segment your users + in the ways that are important to your business. To learn more, + see https://support.google.com/analytics/answer/9267572. + Audience exports contain the users in each audience. + + Audience Export APIs have some methods at alpha and other + methods at beta stability. The intention is to advance methods + to beta stability after some feedback and adoption. To give your + feedback on this API, complete the `Google Analytics Audience + Export API Feedback `__ + form. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.analytics import data_v1beta + + def sample_create_audience_export(): + # Create a client + client = data_v1beta.BetaAnalyticsDataClient() + + # Initialize request argument(s) + audience_export = data_v1beta.AudienceExport() + audience_export.audience = "audience_value" + + request = data_v1beta.CreateAudienceExportRequest( + parent="parent_value", + audience_export=audience_export, + ) + + # Make the request + operation = client.create_audience_export(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.analytics.data_v1beta.types.CreateAudienceExportRequest, dict]): + The request object. A request to create a new audience + export. + parent (str): + Required. The parent resource where this audience export + will be created. Format: ``properties/{property}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + audience_export (google.analytics.data_v1beta.types.AudienceExport): + Required. The audience export to + create. + + This corresponds to the ``audience_export`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.analytics.data_v1beta.types.AudienceExport` An audience export is a list of users in an audience at the time of the + list's creation. One audience may have multiple + audience exports created for different days. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, audience_export]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a analytics_data_api.CreateAudienceExportRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, analytics_data_api.CreateAudienceExportRequest): + request = analytics_data_api.CreateAudienceExportRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if audience_export is not None: + request.audience_export = audience_export + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_audience_export] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + analytics_data_api.AudienceExport, + metadata_type=analytics_data_api.AudienceExportMetadata, + ) + + # Done; return the response. + return response + + def query_audience_export( + self, + request: Optional[ + Union[analytics_data_api.QueryAudienceExportRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> analytics_data_api.QueryAudienceExportResponse: + r"""Retrieves an audience export of users. After creating an + audience, the users are not immediately available for exporting. + First, a request to ``CreateAudienceExport`` is necessary to + create an audience export of users, and then second, this method + is used to retrieve the users in the audience export. + + See `Creating an Audience + Export `__ + for an introduction to Audience Exports with examples. + + Audiences in Google Analytics 4 allow you to segment your users + in the ways that are important to your business. To learn more, + see https://support.google.com/analytics/answer/9267572. + + Audience Export APIs have some methods at alpha and other + methods at beta stability. The intention is to advance methods + to beta stability after some feedback and adoption. To give your + feedback on this API, complete the `Google Analytics Audience + Export API Feedback `__ + form. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.analytics import data_v1beta + + def sample_query_audience_export(): + # Create a client + client = data_v1beta.BetaAnalyticsDataClient() + + # Initialize request argument(s) + request = data_v1beta.QueryAudienceExportRequest( + name="name_value", + ) + + # Make the request + response = client.query_audience_export(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.analytics.data_v1beta.types.QueryAudienceExportRequest, dict]): + The request object. A request to list users in an + audience export. + name (str): + Required. The name of the audience export to retrieve + users from. Format: + ``properties/{property}/audienceExports/{audience_export}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.analytics.data_v1beta.types.QueryAudienceExportResponse: + A list of users in an audience + export. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a analytics_data_api.QueryAudienceExportRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, analytics_data_api.QueryAudienceExportRequest): + request = analytics_data_api.QueryAudienceExportRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.query_audience_export] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_audience_export( + self, + request: Optional[ + Union[analytics_data_api.GetAudienceExportRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> analytics_data_api.AudienceExport: + r"""Gets configuration metadata about a specific audience export. + This method can be used to understand an audience export after + it has been created. + + See `Creating an Audience + Export `__ + for an introduction to Audience Exports with examples. + + Audience Export APIs have some methods at alpha and other + methods at beta stability. The intention is to advance methods + to beta stability after some feedback and adoption. To give your + feedback on this API, complete the `Google Analytics Audience + Export API Feedback `__ + form. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.analytics import data_v1beta + + def sample_get_audience_export(): + # Create a client + client = data_v1beta.BetaAnalyticsDataClient() + + # Initialize request argument(s) + request = data_v1beta.GetAudienceExportRequest( + name="name_value", + ) + + # Make the request + response = client.get_audience_export(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.analytics.data_v1beta.types.GetAudienceExportRequest, dict]): + The request object. A request to retrieve configuration + metadata about a specific audience + export. + name (str): + Required. The audience export resource name. Format: + ``properties/{property}/audienceExports/{audience_export}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.analytics.data_v1beta.types.AudienceExport: + An audience export is a list of users + in an audience at the time of the list's + creation. One audience may have multiple + audience exports created for different + days. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a analytics_data_api.GetAudienceExportRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, analytics_data_api.GetAudienceExportRequest): + request = analytics_data_api.GetAudienceExportRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_audience_export] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_audience_exports( + self, + request: Optional[ + Union[analytics_data_api.ListAudienceExportsRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListAudienceExportsPager: + r"""Lists all audience exports for a property. This method can be + used for you to find and reuse existing audience exports rather + than creating unnecessary new audience exports. The same + audience can have multiple audience exports that represent the + export of users that were in an audience on different days. + + See `Creating an Audience + Export `__ + for an introduction to Audience Exports with examples. + + Audience Export APIs have some methods at alpha and other + methods at beta stability. The intention is to advance methods + to beta stability after some feedback and adoption. To give your + feedback on this API, complete the `Google Analytics Audience + Export API Feedback `__ + form. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.analytics import data_v1beta + + def sample_list_audience_exports(): + # Create a client + client = data_v1beta.BetaAnalyticsDataClient() + + # Initialize request argument(s) + request = data_v1beta.ListAudienceExportsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_audience_exports(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.analytics.data_v1beta.types.ListAudienceExportsRequest, dict]): + The request object. A request to list all audience + exports for a property. + parent (str): + Required. All audience exports for this property will be + listed in the response. Format: + ``properties/{property}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.analytics.data_v1beta.services.beta_analytics_data.pagers.ListAudienceExportsPager: + A list of all audience exports for a + property. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a analytics_data_api.ListAudienceExportsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, analytics_data_api.ListAudienceExportsRequest): + request = analytics_data_api.ListAudienceExportsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_audience_exports] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListAudienceExportsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + def __enter__(self) -> "BetaAnalyticsDataClient": return self diff --git a/packages/google-analytics-data/google/analytics/data_v1beta/services/beta_analytics_data/pagers.py b/packages/google-analytics-data/google/analytics/data_v1beta/services/beta_analytics_data/pagers.py new file mode 100644 index 000000000000..1c7a8807e581 --- /dev/null +++ b/packages/google-analytics-data/google/analytics/data_v1beta/services/beta_analytics_data/pagers.py @@ -0,0 +1,159 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import ( + Any, + AsyncIterator, + Awaitable, + Callable, + Iterator, + Optional, + Sequence, + Tuple, +) + +from google.analytics.data_v1beta.types import analytics_data_api + + +class ListAudienceExportsPager: + """A pager for iterating through ``list_audience_exports`` requests. + + This class thinly wraps an initial + :class:`google.analytics.data_v1beta.types.ListAudienceExportsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``audience_exports`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListAudienceExports`` requests and continue to iterate + through the ``audience_exports`` field on the + corresponding responses. + + All the usual :class:`google.analytics.data_v1beta.types.ListAudienceExportsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., analytics_data_api.ListAudienceExportsResponse], + request: analytics_data_api.ListAudienceExportsRequest, + response: analytics_data_api.ListAudienceExportsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.analytics.data_v1beta.types.ListAudienceExportsRequest): + The initial request object. + response (google.analytics.data_v1beta.types.ListAudienceExportsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = analytics_data_api.ListAudienceExportsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[analytics_data_api.ListAudienceExportsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[analytics_data_api.AudienceExport]: + for page in self.pages: + yield from page.audience_exports + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListAudienceExportsAsyncPager: + """A pager for iterating through ``list_audience_exports`` requests. + + This class thinly wraps an initial + :class:`google.analytics.data_v1beta.types.ListAudienceExportsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``audience_exports`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListAudienceExports`` requests and continue to iterate + through the ``audience_exports`` field on the + corresponding responses. + + All the usual :class:`google.analytics.data_v1beta.types.ListAudienceExportsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[analytics_data_api.ListAudienceExportsResponse] + ], + request: analytics_data_api.ListAudienceExportsRequest, + response: analytics_data_api.ListAudienceExportsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.analytics.data_v1beta.types.ListAudienceExportsRequest): + The initial request object. + response (google.analytics.data_v1beta.types.ListAudienceExportsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = analytics_data_api.ListAudienceExportsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterator[analytics_data_api.ListAudienceExportsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[analytics_data_api.AudienceExport]: + async def async_generator(): + async for page in self.pages: + for response in page.audience_exports: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/packages/google-analytics-data/google/analytics/data_v1beta/services/beta_analytics_data/transports/base.py b/packages/google-analytics-data/google/analytics/data_v1beta/services/beta_analytics_data/transports/base.py index 7a50afa7d266..9f76caeeca5a 100644 --- a/packages/google-analytics-data/google/analytics/data_v1beta/services/beta_analytics_data/transports/base.py +++ b/packages/google-analytics-data/google/analytics/data_v1beta/services/beta_analytics_data/transports/base.py @@ -18,10 +18,11 @@ import google.api_core from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 +from google.api_core import gapic_v1, operations_v1 from google.api_core import retry as retries import google.auth # type: ignore from google.auth import credentials as ga_credentials # type: ignore +from google.longrunning import operations_pb2 # type: ignore from google.oauth2 import service_account # type: ignore from google.analytics.data_v1beta import gapic_version as package_version @@ -160,6 +161,26 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), + self.create_audience_export: gapic_v1.method.wrap_method( + self.create_audience_export, + default_timeout=None, + client_info=client_info, + ), + self.query_audience_export: gapic_v1.method.wrap_method( + self.query_audience_export, + default_timeout=None, + client_info=client_info, + ), + self.get_audience_export: gapic_v1.method.wrap_method( + self.get_audience_export, + default_timeout=None, + client_info=client_info, + ), + self.list_audience_exports: gapic_v1.method.wrap_method( + self.list_audience_exports, + default_timeout=None, + client_info=client_info, + ), } def close(self): @@ -171,6 +192,11 @@ def close(self): """ raise NotImplementedError() + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + @property def run_report( self, @@ -252,6 +278,51 @@ def check_compatibility( ]: raise NotImplementedError() + @property + def create_audience_export( + self, + ) -> Callable[ + [analytics_data_api.CreateAudienceExportRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def query_audience_export( + self, + ) -> Callable[ + [analytics_data_api.QueryAudienceExportRequest], + Union[ + analytics_data_api.QueryAudienceExportResponse, + Awaitable[analytics_data_api.QueryAudienceExportResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_audience_export( + self, + ) -> Callable[ + [analytics_data_api.GetAudienceExportRequest], + Union[ + analytics_data_api.AudienceExport, + Awaitable[analytics_data_api.AudienceExport], + ], + ]: + raise NotImplementedError() + + @property + def list_audience_exports( + self, + ) -> Callable[ + [analytics_data_api.ListAudienceExportsRequest], + Union[ + analytics_data_api.ListAudienceExportsResponse, + Awaitable[analytics_data_api.ListAudienceExportsResponse], + ], + ]: + raise NotImplementedError() + @property def kind(self) -> str: raise NotImplementedError() diff --git a/packages/google-analytics-data/google/analytics/data_v1beta/services/beta_analytics_data/transports/grpc.py b/packages/google-analytics-data/google/analytics/data_v1beta/services/beta_analytics_data/transports/grpc.py index dcf17bd1e155..4237b8da0190 100644 --- a/packages/google-analytics-data/google/analytics/data_v1beta/services/beta_analytics_data/transports/grpc.py +++ b/packages/google-analytics-data/google/analytics/data_v1beta/services/beta_analytics_data/transports/grpc.py @@ -16,10 +16,11 @@ from typing import Callable, Dict, Optional, Sequence, Tuple, Union import warnings -from google.api_core import gapic_v1, grpc_helpers +from google.api_core import gapic_v1, grpc_helpers, operations_v1 import google.auth # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore +from google.longrunning import operations_pb2 # type: ignore import grpc # type: ignore from google.analytics.data_v1beta.types import analytics_data_api @@ -110,6 +111,7 @@ def __init__( self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -228,6 +230,20 @@ def grpc_channel(self) -> grpc.Channel: """Return the channel designed to connect to this service.""" return self._grpc_channel + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient(self.grpc_channel) + + # Return the client from cache. + return self._operations_client + @property def run_report( self, @@ -476,6 +492,194 @@ def check_compatibility( ) return self._stubs["check_compatibility"] + @property + def create_audience_export( + self, + ) -> Callable[ + [analytics_data_api.CreateAudienceExportRequest], operations_pb2.Operation + ]: + r"""Return a callable for the create audience export method over gRPC. + + Creates an audience export for later retrieval. This method + quickly returns the audience export's resource name and + initiates a long running asynchronous request to form an + audience export. To export the users in an audience export, + first create the audience export through this method and then + send the audience resource name to the ``QueryAudienceExport`` + method. + + See `Creating an Audience + Export `__ + for an introduction to Audience Exports with examples. + + An audience export is a snapshot of the users currently in the + audience at the time of audience export creation. Creating + audience exports for one audience on different days will return + different results as users enter and exit the audience. + + Audiences in Google Analytics 4 allow you to segment your users + in the ways that are important to your business. To learn more, + see https://support.google.com/analytics/answer/9267572. + Audience exports contain the users in each audience. + + Audience Export APIs have some methods at alpha and other + methods at beta stability. The intention is to advance methods + to beta stability after some feedback and adoption. To give your + feedback on this API, complete the `Google Analytics Audience + Export API Feedback `__ + form. + + Returns: + Callable[[~.CreateAudienceExportRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_audience_export" not in self._stubs: + self._stubs["create_audience_export"] = self.grpc_channel.unary_unary( + "/google.analytics.data.v1beta.BetaAnalyticsData/CreateAudienceExport", + request_serializer=analytics_data_api.CreateAudienceExportRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_audience_export"] + + @property + def query_audience_export( + self, + ) -> Callable[ + [analytics_data_api.QueryAudienceExportRequest], + analytics_data_api.QueryAudienceExportResponse, + ]: + r"""Return a callable for the query audience export method over gRPC. + + Retrieves an audience export of users. After creating an + audience, the users are not immediately available for exporting. + First, a request to ``CreateAudienceExport`` is necessary to + create an audience export of users, and then second, this method + is used to retrieve the users in the audience export. + + See `Creating an Audience + Export `__ + for an introduction to Audience Exports with examples. + + Audiences in Google Analytics 4 allow you to segment your users + in the ways that are important to your business. To learn more, + see https://support.google.com/analytics/answer/9267572. + + Audience Export APIs have some methods at alpha and other + methods at beta stability. The intention is to advance methods + to beta stability after some feedback and adoption. To give your + feedback on this API, complete the `Google Analytics Audience + Export API Feedback `__ + form. + + Returns: + Callable[[~.QueryAudienceExportRequest], + ~.QueryAudienceExportResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "query_audience_export" not in self._stubs: + self._stubs["query_audience_export"] = self.grpc_channel.unary_unary( + "/google.analytics.data.v1beta.BetaAnalyticsData/QueryAudienceExport", + request_serializer=analytics_data_api.QueryAudienceExportRequest.serialize, + response_deserializer=analytics_data_api.QueryAudienceExportResponse.deserialize, + ) + return self._stubs["query_audience_export"] + + @property + def get_audience_export( + self, + ) -> Callable[ + [analytics_data_api.GetAudienceExportRequest], analytics_data_api.AudienceExport + ]: + r"""Return a callable for the get audience export method over gRPC. + + Gets configuration metadata about a specific audience export. + This method can be used to understand an audience export after + it has been created. + + See `Creating an Audience + Export `__ + for an introduction to Audience Exports with examples. + + Audience Export APIs have some methods at alpha and other + methods at beta stability. The intention is to advance methods + to beta stability after some feedback and adoption. To give your + feedback on this API, complete the `Google Analytics Audience + Export API Feedback `__ + form. + + Returns: + Callable[[~.GetAudienceExportRequest], + ~.AudienceExport]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_audience_export" not in self._stubs: + self._stubs["get_audience_export"] = self.grpc_channel.unary_unary( + "/google.analytics.data.v1beta.BetaAnalyticsData/GetAudienceExport", + request_serializer=analytics_data_api.GetAudienceExportRequest.serialize, + response_deserializer=analytics_data_api.AudienceExport.deserialize, + ) + return self._stubs["get_audience_export"] + + @property + def list_audience_exports( + self, + ) -> Callable[ + [analytics_data_api.ListAudienceExportsRequest], + analytics_data_api.ListAudienceExportsResponse, + ]: + r"""Return a callable for the list audience exports method over gRPC. + + Lists all audience exports for a property. This method can be + used for you to find and reuse existing audience exports rather + than creating unnecessary new audience exports. The same + audience can have multiple audience exports that represent the + export of users that were in an audience on different days. + + See `Creating an Audience + Export `__ + for an introduction to Audience Exports with examples. + + Audience Export APIs have some methods at alpha and other + methods at beta stability. The intention is to advance methods + to beta stability after some feedback and adoption. To give your + feedback on this API, complete the `Google Analytics Audience + Export API Feedback `__ + form. + + Returns: + Callable[[~.ListAudienceExportsRequest], + ~.ListAudienceExportsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_audience_exports" not in self._stubs: + self._stubs["list_audience_exports"] = self.grpc_channel.unary_unary( + "/google.analytics.data.v1beta.BetaAnalyticsData/ListAudienceExports", + request_serializer=analytics_data_api.ListAudienceExportsRequest.serialize, + response_deserializer=analytics_data_api.ListAudienceExportsResponse.deserialize, + ) + return self._stubs["list_audience_exports"] + def close(self): self.grpc_channel.close() diff --git a/packages/google-analytics-data/google/analytics/data_v1beta/services/beta_analytics_data/transports/grpc_asyncio.py b/packages/google-analytics-data/google/analytics/data_v1beta/services/beta_analytics_data/transports/grpc_asyncio.py index 06a8e2206fde..5ded3483f517 100644 --- a/packages/google-analytics-data/google/analytics/data_v1beta/services/beta_analytics_data/transports/grpc_asyncio.py +++ b/packages/google-analytics-data/google/analytics/data_v1beta/services/beta_analytics_data/transports/grpc_asyncio.py @@ -16,9 +16,10 @@ from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union import warnings -from google.api_core import gapic_v1, grpc_helpers_async +from google.api_core import gapic_v1, grpc_helpers_async, operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore +from google.longrunning import operations_pb2 # type: ignore import grpc # type: ignore from grpc.experimental import aio # type: ignore @@ -156,6 +157,7 @@ def __init__( self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -231,6 +233,22 @@ def grpc_channel(self) -> aio.Channel: # Return the channel from cache. return self._grpc_channel + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + @property def run_report( self, @@ -482,6 +500,196 @@ def check_compatibility( ) return self._stubs["check_compatibility"] + @property + def create_audience_export( + self, + ) -> Callable[ + [analytics_data_api.CreateAudienceExportRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the create audience export method over gRPC. + + Creates an audience export for later retrieval. This method + quickly returns the audience export's resource name and + initiates a long running asynchronous request to form an + audience export. To export the users in an audience export, + first create the audience export through this method and then + send the audience resource name to the ``QueryAudienceExport`` + method. + + See `Creating an Audience + Export `__ + for an introduction to Audience Exports with examples. + + An audience export is a snapshot of the users currently in the + audience at the time of audience export creation. Creating + audience exports for one audience on different days will return + different results as users enter and exit the audience. + + Audiences in Google Analytics 4 allow you to segment your users + in the ways that are important to your business. To learn more, + see https://support.google.com/analytics/answer/9267572. + Audience exports contain the users in each audience. + + Audience Export APIs have some methods at alpha and other + methods at beta stability. The intention is to advance methods + to beta stability after some feedback and adoption. To give your + feedback on this API, complete the `Google Analytics Audience + Export API Feedback `__ + form. + + Returns: + Callable[[~.CreateAudienceExportRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_audience_export" not in self._stubs: + self._stubs["create_audience_export"] = self.grpc_channel.unary_unary( + "/google.analytics.data.v1beta.BetaAnalyticsData/CreateAudienceExport", + request_serializer=analytics_data_api.CreateAudienceExportRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_audience_export"] + + @property + def query_audience_export( + self, + ) -> Callable[ + [analytics_data_api.QueryAudienceExportRequest], + Awaitable[analytics_data_api.QueryAudienceExportResponse], + ]: + r"""Return a callable for the query audience export method over gRPC. + + Retrieves an audience export of users. After creating an + audience, the users are not immediately available for exporting. + First, a request to ``CreateAudienceExport`` is necessary to + create an audience export of users, and then second, this method + is used to retrieve the users in the audience export. + + See `Creating an Audience + Export `__ + for an introduction to Audience Exports with examples. + + Audiences in Google Analytics 4 allow you to segment your users + in the ways that are important to your business. To learn more, + see https://support.google.com/analytics/answer/9267572. + + Audience Export APIs have some methods at alpha and other + methods at beta stability. The intention is to advance methods + to beta stability after some feedback and adoption. To give your + feedback on this API, complete the `Google Analytics Audience + Export API Feedback `__ + form. + + Returns: + Callable[[~.QueryAudienceExportRequest], + Awaitable[~.QueryAudienceExportResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "query_audience_export" not in self._stubs: + self._stubs["query_audience_export"] = self.grpc_channel.unary_unary( + "/google.analytics.data.v1beta.BetaAnalyticsData/QueryAudienceExport", + request_serializer=analytics_data_api.QueryAudienceExportRequest.serialize, + response_deserializer=analytics_data_api.QueryAudienceExportResponse.deserialize, + ) + return self._stubs["query_audience_export"] + + @property + def get_audience_export( + self, + ) -> Callable[ + [analytics_data_api.GetAudienceExportRequest], + Awaitable[analytics_data_api.AudienceExport], + ]: + r"""Return a callable for the get audience export method over gRPC. + + Gets configuration metadata about a specific audience export. + This method can be used to understand an audience export after + it has been created. + + See `Creating an Audience + Export `__ + for an introduction to Audience Exports with examples. + + Audience Export APIs have some methods at alpha and other + methods at beta stability. The intention is to advance methods + to beta stability after some feedback and adoption. To give your + feedback on this API, complete the `Google Analytics Audience + Export API Feedback `__ + form. + + Returns: + Callable[[~.GetAudienceExportRequest], + Awaitable[~.AudienceExport]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_audience_export" not in self._stubs: + self._stubs["get_audience_export"] = self.grpc_channel.unary_unary( + "/google.analytics.data.v1beta.BetaAnalyticsData/GetAudienceExport", + request_serializer=analytics_data_api.GetAudienceExportRequest.serialize, + response_deserializer=analytics_data_api.AudienceExport.deserialize, + ) + return self._stubs["get_audience_export"] + + @property + def list_audience_exports( + self, + ) -> Callable[ + [analytics_data_api.ListAudienceExportsRequest], + Awaitable[analytics_data_api.ListAudienceExportsResponse], + ]: + r"""Return a callable for the list audience exports method over gRPC. + + Lists all audience exports for a property. This method can be + used for you to find and reuse existing audience exports rather + than creating unnecessary new audience exports. The same + audience can have multiple audience exports that represent the + export of users that were in an audience on different days. + + See `Creating an Audience + Export `__ + for an introduction to Audience Exports with examples. + + Audience Export APIs have some methods at alpha and other + methods at beta stability. The intention is to advance methods + to beta stability after some feedback and adoption. To give your + feedback on this API, complete the `Google Analytics Audience + Export API Feedback `__ + form. + + Returns: + Callable[[~.ListAudienceExportsRequest], + Awaitable[~.ListAudienceExportsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_audience_exports" not in self._stubs: + self._stubs["list_audience_exports"] = self.grpc_channel.unary_unary( + "/google.analytics.data.v1beta.BetaAnalyticsData/ListAudienceExports", + request_serializer=analytics_data_api.ListAudienceExportsRequest.serialize, + response_deserializer=analytics_data_api.ListAudienceExportsResponse.deserialize, + ) + return self._stubs["list_audience_exports"] + def close(self): return self.grpc_channel.close() diff --git a/packages/google-analytics-data/google/analytics/data_v1beta/services/beta_analytics_data/transports/rest.py b/packages/google-analytics-data/google/analytics/data_v1beta/services/beta_analytics_data/transports/rest.py index 89b5378a9527..e42ed6342419 100644 --- a/packages/google-analytics-data/google/analytics/data_v1beta/services/beta_analytics_data/transports/rest.py +++ b/packages/google-analytics-data/google/analytics/data_v1beta/services/beta_analytics_data/transports/rest.py @@ -20,7 +20,13 @@ from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union import warnings -from google.api_core import gapic_v1, path_template, rest_helpers, rest_streaming +from google.api_core import ( + gapic_v1, + operations_v1, + path_template, + rest_helpers, + rest_streaming, +) from google.api_core import exceptions as core_exceptions from google.api_core import retry as retries from google.auth import credentials as ga_credentials # type: ignore @@ -36,6 +42,8 @@ OptionalRetry = Union[retries.Retry, object] # type: ignore +from google.longrunning import operations_pb2 # type: ignore + from google.analytics.data_v1beta.types import analytics_data_api from .base import BetaAnalyticsDataTransport @@ -87,6 +95,22 @@ def post_check_compatibility(self, response): logging.log(f"Received response: {response}") return response + def pre_create_audience_export(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_audience_export(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_audience_export(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_audience_export(self, response): + logging.log(f"Received response: {response}") + return response + def pre_get_metadata(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -95,6 +119,22 @@ def post_get_metadata(self, response): logging.log(f"Received response: {response}") return response + def pre_list_audience_exports(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_audience_exports(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_query_audience_export(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_query_audience_export(self, response): + logging.log(f"Received response: {response}") + return response + def pre_run_pivot_report(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -196,6 +236,54 @@ def post_check_compatibility( """ return response + def pre_create_audience_export( + self, + request: analytics_data_api.CreateAudienceExportRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + analytics_data_api.CreateAudienceExportRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for create_audience_export + + Override in a subclass to manipulate the request or metadata + before they are sent to the BetaAnalyticsData server. + """ + return request, metadata + + def post_create_audience_export( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for create_audience_export + + Override in a subclass to manipulate the response + after it is returned by the BetaAnalyticsData server but before + it is returned to user code. + """ + return response + + def pre_get_audience_export( + self, + request: analytics_data_api.GetAudienceExportRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[analytics_data_api.GetAudienceExportRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_audience_export + + Override in a subclass to manipulate the request or metadata + before they are sent to the BetaAnalyticsData server. + """ + return request, metadata + + def post_get_audience_export( + self, response: analytics_data_api.AudienceExport + ) -> analytics_data_api.AudienceExport: + """Post-rpc interceptor for get_audience_export + + Override in a subclass to manipulate the response + after it is returned by the BetaAnalyticsData server but before + it is returned to user code. + """ + return response + def pre_get_metadata( self, request: analytics_data_api.GetMetadataRequest, @@ -219,6 +307,56 @@ def post_get_metadata( """ return response + def pre_list_audience_exports( + self, + request: analytics_data_api.ListAudienceExportsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + analytics_data_api.ListAudienceExportsRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for list_audience_exports + + Override in a subclass to manipulate the request or metadata + before they are sent to the BetaAnalyticsData server. + """ + return request, metadata + + def post_list_audience_exports( + self, response: analytics_data_api.ListAudienceExportsResponse + ) -> analytics_data_api.ListAudienceExportsResponse: + """Post-rpc interceptor for list_audience_exports + + Override in a subclass to manipulate the response + after it is returned by the BetaAnalyticsData server but before + it is returned to user code. + """ + return response + + def pre_query_audience_export( + self, + request: analytics_data_api.QueryAudienceExportRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + analytics_data_api.QueryAudienceExportRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for query_audience_export + + Override in a subclass to manipulate the request or metadata + before they are sent to the BetaAnalyticsData server. + """ + return request, metadata + + def post_query_audience_export( + self, response: analytics_data_api.QueryAudienceExportResponse + ) -> analytics_data_api.QueryAudienceExportResponse: + """Post-rpc interceptor for query_audience_export + + Override in a subclass to manipulate the response + after it is returned by the BetaAnalyticsData server but before + it is returned to user code. + """ + return response + def pre_run_pivot_report( self, request: analytics_data_api.RunPivotReportRequest, @@ -380,11 +518,39 @@ def __init__( self._session = AuthorizedSession( self._credentials, default_host=self.DEFAULT_HOST ) + self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None if client_cert_source_for_mtls: self._session.configure_mtls_channel(client_cert_source_for_mtls) self._interceptor = interceptor or BetaAnalyticsDataRestInterceptor() self._prep_wrapped_messages(client_info) + @property + def operations_client(self) -> operations_v1.AbstractOperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Only create a new client if we do not already have one. + if self._operations_client is None: + http_options: Dict[str, List[Dict[str, str]]] = {} + + rest_transport = operations_v1.OperationsRestTransport( + host=self._host, + # use the credentials which are saved + credentials=self._credentials, + scopes=self._scopes, + http_options=http_options, + path_prefix="v1beta", + ) + + self._operations_client = operations_v1.AbstractOperationsClient( + transport=rest_transport + ) + + # Return the client from cache. + return self._operations_client + class _BatchRunPivotReports(BetaAnalyticsDataRestStub): def __hash__(self): return hash("BatchRunPivotReports") @@ -656,6 +822,200 @@ def __call__( resp = self._interceptor.post_check_compatibility(resp) return resp + class _CreateAudienceExport(BetaAnalyticsDataRestStub): + def __hash__(self): + return hash("CreateAudienceExport") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: analytics_data_api.CreateAudienceExportRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the create audience export method over HTTP. + + Args: + request (~.analytics_data_api.CreateAudienceExportRequest): + The request object. A request to create a new audience + export. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta/{parent=properties/*}/audienceExports", + "body": "audience_export", + }, + ] + request, metadata = self._interceptor.pre_create_audience_export( + request, metadata + ) + pb_request = analytics_data_api.CreateAudienceExportRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_audience_export(resp) + return resp + + class _GetAudienceExport(BetaAnalyticsDataRestStub): + def __hash__(self): + return hash("GetAudienceExport") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: analytics_data_api.GetAudienceExportRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> analytics_data_api.AudienceExport: + r"""Call the get audience export method over HTTP. + + Args: + request (~.analytics_data_api.GetAudienceExportRequest): + The request object. A request to retrieve configuration + metadata about a specific audience + export. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.analytics_data_api.AudienceExport: + An audience export is a list of users + in an audience at the time of the list's + creation. One audience may have multiple + audience exports created for different + days. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1beta/{name=properties/*/audienceExports/*}", + }, + ] + request, metadata = self._interceptor.pre_get_audience_export( + request, metadata + ) + pb_request = analytics_data_api.GetAudienceExportRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = analytics_data_api.AudienceExport() + pb_resp = analytics_data_api.AudienceExport.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_audience_export(resp) + return resp + class _GetMetadata(BetaAnalyticsDataRestStub): def __hash__(self): return hash("GetMetadata") @@ -692,8 +1052,9 @@ def __call__( Returns: ~.analytics_data_api.Metadata: - The dimensions and metrics currently - accepted in reporting methods. + The dimensions, metrics and + comparisons currently accepted in + reporting methods. """ @@ -745,6 +1106,197 @@ def __call__( resp = self._interceptor.post_get_metadata(resp) return resp + class _ListAudienceExports(BetaAnalyticsDataRestStub): + def __hash__(self): + return hash("ListAudienceExports") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: analytics_data_api.ListAudienceExportsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> analytics_data_api.ListAudienceExportsResponse: + r"""Call the list audience exports method over HTTP. + + Args: + request (~.analytics_data_api.ListAudienceExportsRequest): + The request object. A request to list all audience + exports for a property. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.analytics_data_api.ListAudienceExportsResponse: + A list of all audience exports for a + property. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1beta/{parent=properties/*}/audienceExports", + }, + ] + request, metadata = self._interceptor.pre_list_audience_exports( + request, metadata + ) + pb_request = analytics_data_api.ListAudienceExportsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = analytics_data_api.ListAudienceExportsResponse() + pb_resp = analytics_data_api.ListAudienceExportsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_audience_exports(resp) + return resp + + class _QueryAudienceExport(BetaAnalyticsDataRestStub): + def __hash__(self): + return hash("QueryAudienceExport") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: analytics_data_api.QueryAudienceExportRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> analytics_data_api.QueryAudienceExportResponse: + r"""Call the query audience export method over HTTP. + + Args: + request (~.analytics_data_api.QueryAudienceExportRequest): + The request object. A request to list users in an + audience export. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.analytics_data_api.QueryAudienceExportResponse: + A list of users in an audience + export. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta/{name=properties/*/audienceExports/*}:query", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_query_audience_export( + request, metadata + ) + pb_request = analytics_data_api.QueryAudienceExportRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = analytics_data_api.QueryAudienceExportResponse() + pb_resp = analytics_data_api.QueryAudienceExportResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_query_audience_export(resp) + return resp + class _RunPivotReport(BetaAnalyticsDataRestStub): def __hash__(self): return hash("RunPivotReport") @@ -1042,6 +1594,26 @@ def check_compatibility( # In C++ this would require a dynamic_cast return self._CheckCompatibility(self._session, self._host, self._interceptor) # type: ignore + @property + def create_audience_export( + self, + ) -> Callable[ + [analytics_data_api.CreateAudienceExportRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateAudienceExport(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_audience_export( + self, + ) -> Callable[ + [analytics_data_api.GetAudienceExportRequest], analytics_data_api.AudienceExport + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetAudienceExport(self._session, self._host, self._interceptor) # type: ignore + @property def get_metadata( self, @@ -1050,6 +1622,28 @@ def get_metadata( # In C++ this would require a dynamic_cast return self._GetMetadata(self._session, self._host, self._interceptor) # type: ignore + @property + def list_audience_exports( + self, + ) -> Callable[ + [analytics_data_api.ListAudienceExportsRequest], + analytics_data_api.ListAudienceExportsResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListAudienceExports(self._session, self._host, self._interceptor) # type: ignore + + @property + def query_audience_export( + self, + ) -> Callable[ + [analytics_data_api.QueryAudienceExportRequest], + analytics_data_api.QueryAudienceExportResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._QueryAudienceExport(self._session, self._host, self._interceptor) # type: ignore + @property def run_pivot_report( self, diff --git a/packages/google-analytics-data/google/analytics/data_v1beta/types/__init__.py b/packages/google-analytics-data/google/analytics/data_v1beta/types/__init__.py index f2eea48d30e7..3a88595a3843 100644 --- a/packages/google-analytics-data/google/analytics/data_v1beta/types/__init__.py +++ b/packages/google-analytics-data/google/analytics/data_v1beta/types/__init__.py @@ -14,14 +14,25 @@ # limitations under the License. # from .analytics_data_api import ( + AudienceDimension, + AudienceDimensionValue, + AudienceExport, + AudienceExportMetadata, + AudienceRow, BatchRunPivotReportsRequest, BatchRunPivotReportsResponse, BatchRunReportsRequest, BatchRunReportsResponse, CheckCompatibilityRequest, CheckCompatibilityResponse, + CreateAudienceExportRequest, + GetAudienceExportRequest, GetMetadataRequest, + ListAudienceExportsRequest, + ListAudienceExportsResponse, Metadata, + QueryAudienceExportRequest, + QueryAudienceExportResponse, RunPivotReportRequest, RunPivotReportResponse, RunRealtimeReportRequest, @@ -63,17 +74,29 @@ ResponseMetaData, RestrictedMetricType, Row, + SamplingMetadata, ) __all__ = ( + "AudienceDimension", + "AudienceDimensionValue", + "AudienceExport", + "AudienceExportMetadata", + "AudienceRow", "BatchRunPivotReportsRequest", "BatchRunPivotReportsResponse", "BatchRunReportsRequest", "BatchRunReportsResponse", "CheckCompatibilityRequest", "CheckCompatibilityResponse", + "CreateAudienceExportRequest", + "GetAudienceExportRequest", "GetMetadataRequest", + "ListAudienceExportsRequest", + "ListAudienceExportsResponse", "Metadata", + "QueryAudienceExportRequest", + "QueryAudienceExportResponse", "RunPivotReportRequest", "RunPivotReportResponse", "RunRealtimeReportRequest", @@ -109,6 +132,7 @@ "QuotaStatus", "ResponseMetaData", "Row", + "SamplingMetadata", "Compatibility", "MetricAggregation", "MetricType", diff --git a/packages/google-analytics-data/google/analytics/data_v1beta/types/analytics_data_api.py b/packages/google-analytics-data/google/analytics/data_v1beta/types/analytics_data_api.py index 579c0d76e143..d46fc85fa256 100644 --- a/packages/google-analytics-data/google/analytics/data_v1beta/types/analytics_data_api.py +++ b/packages/google-analytics-data/google/analytics/data_v1beta/types/analytics_data_api.py @@ -17,6 +17,7 @@ from typing import MutableMapping, MutableSequence +from google.protobuf import timestamp_pb2 # type: ignore import proto # type: ignore from google.analytics.data_v1beta.types import data @@ -38,6 +39,17 @@ "GetMetadataRequest", "RunRealtimeReportRequest", "RunRealtimeReportResponse", + "GetAudienceExportRequest", + "ListAudienceExportsRequest", + "ListAudienceExportsResponse", + "CreateAudienceExportRequest", + "AudienceExport", + "AudienceExportMetadata", + "QueryAudienceExportRequest", + "QueryAudienceExportResponse", + "AudienceRow", + "AudienceDimension", + "AudienceDimensionValue", }, ) @@ -135,8 +147,8 @@ class CheckCompatibilityResponse(proto.Message): class Metadata(proto.Message): - r"""The dimensions and metrics currently accepted in reporting - methods. + r"""The dimensions, metrics and comparisons currently accepted in + reporting methods. Attributes: name (str): @@ -991,4 +1003,409 @@ class RunRealtimeReportResponse(proto.Message): ) +class GetAudienceExportRequest(proto.Message): + r"""A request to retrieve configuration metadata about a specific + audience export. + + Attributes: + name (str): + Required. The audience export resource name. Format: + ``properties/{property}/audienceExports/{audience_export}`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListAudienceExportsRequest(proto.Message): + r"""A request to list all audience exports for a property. + + Attributes: + parent (str): + Required. All audience exports for this property will be + listed in the response. Format: ``properties/{property}`` + page_size (int): + Optional. The maximum number of audience + exports to return. The service may return fewer + than this value. If unspecified, at most 200 + audience exports will be returned. The maximum + value is 1000 (higher values will be coerced to + the maximum). + page_token (str): + Optional. A page token, received from a previous + ``ListAudienceExports`` call. Provide this to retrieve the + subsequent page. + + When paginating, all other parameters provided to + ``ListAudienceExports`` must match the call that provided + the page token. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + + +class ListAudienceExportsResponse(proto.Message): + r"""A list of all audience exports for a property. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + audience_exports (MutableSequence[google.analytics.data_v1beta.types.AudienceExport]): + Each audience export for a property. + next_page_token (str): + A token, which can be sent as ``page_token`` to retrieve the + next page. If this field is omitted, there are no subsequent + pages. + + This field is a member of `oneof`_ ``_next_page_token``. + """ + + @property + def raw_page(self): + return self + + audience_exports: MutableSequence["AudienceExport"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="AudienceExport", + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + optional=True, + ) + + +class CreateAudienceExportRequest(proto.Message): + r"""A request to create a new audience export. + + Attributes: + parent (str): + Required. The parent resource where this audience export + will be created. Format: ``properties/{property}`` + audience_export (google.analytics.data_v1beta.types.AudienceExport): + Required. The audience export to create. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + audience_export: "AudienceExport" = proto.Field( + proto.MESSAGE, + number=2, + message="AudienceExport", + ) + + +class AudienceExport(proto.Message): + r"""An audience export is a list of users in an audience at the + time of the list's creation. One audience may have multiple + audience exports created for different days. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + name (str): + Output only. Identifier. The audience export resource name + assigned during creation. This resource name identifies this + ``AudienceExport``. + + Format: + ``properties/{property}/audienceExports/{audience_export}`` + audience (str): + Required. The audience resource name. This resource name + identifies the audience being listed and is shared between + the Analytics Data & Admin APIs. + + Format: ``properties/{property}/audiences/{audience}`` + audience_display_name (str): + Output only. The descriptive display name for + this audience. For example, "Purchasers". + dimensions (MutableSequence[google.analytics.data_v1beta.types.AudienceDimension]): + Required. The dimensions requested and + displayed in the query response. + state (google.analytics.data_v1beta.types.AudienceExport.State): + Output only. The current state for this + AudienceExport. + + This field is a member of `oneof`_ ``_state``. + begin_creating_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The time when CreateAudienceExport was called + and the AudienceExport began the ``CREATING`` state. + + This field is a member of `oneof`_ ``_begin_creating_time``. + creation_quota_tokens_charged (int): + Output only. The total quota tokens charged during creation + of the AudienceExport. Because this token count is based on + activity from the ``CREATING`` state, this tokens charged + will be fixed once an AudienceExport enters the ``ACTIVE`` + or ``FAILED`` states. + row_count (int): + Output only. The total number of rows in the + AudienceExport result. + + This field is a member of `oneof`_ ``_row_count``. + error_message (str): + Output only. Error message is populated when + an audience export fails during creation. A + common reason for such a failure is quota + exhaustion. + + This field is a member of `oneof`_ ``_error_message``. + percentage_completed (float): + Output only. The percentage completed for + this audience export ranging between 0 to 100. + + This field is a member of `oneof`_ ``_percentage_completed``. + """ + + class State(proto.Enum): + r"""The AudienceExport currently exists in this state. + + Values: + STATE_UNSPECIFIED (0): + Unspecified state will never be used. + CREATING (1): + The AudienceExport is currently creating and + will be available in the future. Creating occurs + immediately after the CreateAudienceExport call. + ACTIVE (2): + The AudienceExport is fully created and ready + for querying. An AudienceExport is updated to + active asynchronously from a request; this + occurs some time (for example 15 minutes) after + the initial create call. + FAILED (3): + The AudienceExport failed to be created. It + is possible that re-requesting this audience + export will succeed. + """ + STATE_UNSPECIFIED = 0 + CREATING = 1 + ACTIVE = 2 + FAILED = 3 + + name: str = proto.Field( + proto.STRING, + number=1, + ) + audience: str = proto.Field( + proto.STRING, + number=2, + ) + audience_display_name: str = proto.Field( + proto.STRING, + number=3, + ) + dimensions: MutableSequence["AudienceDimension"] = proto.RepeatedField( + proto.MESSAGE, + number=4, + message="AudienceDimension", + ) + state: State = proto.Field( + proto.ENUM, + number=5, + optional=True, + enum=State, + ) + begin_creating_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=6, + optional=True, + message=timestamp_pb2.Timestamp, + ) + creation_quota_tokens_charged: int = proto.Field( + proto.INT32, + number=7, + ) + row_count: int = proto.Field( + proto.INT32, + number=8, + optional=True, + ) + error_message: str = proto.Field( + proto.STRING, + number=9, + optional=True, + ) + percentage_completed: float = proto.Field( + proto.DOUBLE, + number=10, + optional=True, + ) + + +class AudienceExportMetadata(proto.Message): + r"""This metadata is currently blank.""" + + +class QueryAudienceExportRequest(proto.Message): + r"""A request to list users in an audience export. + + Attributes: + name (str): + Required. The name of the audience export to retrieve users + from. Format: + ``properties/{property}/audienceExports/{audience_export}`` + offset (int): + Optional. The row count of the start row. The first row is + counted as row 0. + + When paging, the first request does not specify offset; or + equivalently, sets offset to 0; the first request returns + the first ``limit`` of rows. The second request sets offset + to the ``limit`` of the first request; the second request + returns the second ``limit`` of rows. + + To learn more about this pagination parameter, see + `Pagination `__. + limit (int): + Optional. The number of rows to return. If unspecified, + 10,000 rows are returned. The API returns a maximum of + 250,000 rows per request, no matter how many you ask for. + ``limit`` must be positive. + + The API can also return fewer rows than the requested + ``limit``, if there aren't as many dimension values as the + ``limit``. + + To learn more about this pagination parameter, see + `Pagination `__. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + offset: int = proto.Field( + proto.INT64, + number=2, + ) + limit: int = proto.Field( + proto.INT64, + number=3, + ) + + +class QueryAudienceExportResponse(proto.Message): + r"""A list of users in an audience export. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + audience_export (google.analytics.data_v1beta.types.AudienceExport): + Configuration data about AudienceExport being + queried. Returned to help interpret the audience + rows in this response. For example, the + dimensions in this AudienceExport correspond to + the columns in the AudienceRows. + + This field is a member of `oneof`_ ``_audience_export``. + audience_rows (MutableSequence[google.analytics.data_v1beta.types.AudienceRow]): + Rows for each user in an audience export. The + number of rows in this response will be less + than or equal to request's page size. + row_count (int): + The total number of rows in the AudienceExport result. + ``rowCount`` is independent of the number of rows returned + in the response, the ``limit`` request parameter, and the + ``offset`` request parameter. For example if a query returns + 175 rows and includes ``limit`` of 50 in the API request, + the response will contain ``rowCount`` of 175 but only 50 + rows. + + To learn more about this pagination parameter, see + `Pagination `__. + + This field is a member of `oneof`_ ``_row_count``. + """ + + audience_export: "AudienceExport" = proto.Field( + proto.MESSAGE, + number=1, + optional=True, + message="AudienceExport", + ) + audience_rows: MutableSequence["AudienceRow"] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message="AudienceRow", + ) + row_count: int = proto.Field( + proto.INT32, + number=3, + optional=True, + ) + + +class AudienceRow(proto.Message): + r"""Dimension value attributes for the audience user row. + + Attributes: + dimension_values (MutableSequence[google.analytics.data_v1beta.types.AudienceDimensionValue]): + Each dimension value attribute for an + audience user. One dimension value will be added + for each dimension column requested. + """ + + dimension_values: MutableSequence["AudienceDimensionValue"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="AudienceDimensionValue", + ) + + +class AudienceDimension(proto.Message): + r"""An audience dimension is a user attribute. Specific user attributed + are requested and then later returned in the + ``QueryAudienceExportResponse``. + + Attributes: + dimension_name (str): + Optional. The API name of the dimension. See the `API + Dimensions `__ + for the list of dimension names. + """ + + dimension_name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class AudienceDimensionValue(proto.Message): + r"""The value of a dimension. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + value (str): + Value as a string if the dimension type is a + string. + + This field is a member of `oneof`_ ``one_value``. + """ + + value: str = proto.Field( + proto.STRING, + number=1, + oneof="one_value", + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-analytics-data/google/analytics/data_v1beta/types/data.py b/packages/google-analytics-data/google/analytics/data_v1beta/types/data.py index 121582aa9dea..037fed97904e 100644 --- a/packages/google-analytics-data/google/analytics/data_v1beta/types/data.py +++ b/packages/google-analytics-data/google/analytics/data_v1beta/types/data.py @@ -41,6 +41,7 @@ "CohortsRange", "CohortReportSettings", "ResponseMetaData", + "SamplingMetadata", "DimensionHeader", "MetricHeader", "PivotHeader", @@ -284,7 +285,15 @@ class Dimension(proto.Message): name (str): The name of the dimension. See the `API Dimensions `__ - for the list of dimension names. + for the list of dimension names supported by core reporting + methods such as ``runReport`` and ``batchRunReports``. See + `Realtime + Dimensions `__ + for the list of dimension names supported by the + ``runRealtimeReport`` method. See `Funnel + Dimensions `__ + for the list of dimension names supported by the + ``runFunnelReport`` method. If ``dimensionExpression`` is specified, ``name`` can be any string that you would like within the allowed character set. @@ -418,7 +427,15 @@ class Metric(proto.Message): name (str): The name of the metric. See the `API Metrics `__ - for the list of metric names. + for the list of metric names supported by core reporting + methods such as ``runReport`` and ``batchRunReports``. See + `Realtime + Metrics `__ + for the list of metric names supported by the + ``runRealtimeReport`` method. See `Funnel + Metrics `__ + for the list of metric names supported by the + ``runFunnelReport`` method. If ``expression`` is specified, ``name`` can be any string that you would like within the allowed character set. For @@ -1255,11 +1272,19 @@ class ResponseMetaData(proto.Message): subject to thresholding thresholding and no data is absent from the report, and this happens when all data is above the thresholds. To learn more, see `Data - thresholds `__ - and `About Demographics and - Interests `__. + thresholds `__. This field is a member of `oneof`_ ``_subject_to_thresholding``. + sampling_metadatas (MutableSequence[google.analytics.data_v1beta.types.SamplingMetadata]): + If this report results is + `sampled `__, + this describes the percentage of events used in this report. + One ``samplingMetadatas`` is populated for each date range. + Each ``samplingMetadatas`` corresponds to a date range in + order that date ranges were specified in the request. + + However if the results are not sampled, this field will not + be defined. """ class SchemaRestrictionResponse(proto.Message): @@ -1341,6 +1366,46 @@ class ActiveMetricRestriction(proto.Message): number=8, optional=True, ) + sampling_metadatas: MutableSequence["SamplingMetadata"] = proto.RepeatedField( + proto.MESSAGE, + number=9, + message="SamplingMetadata", + ) + + +class SamplingMetadata(proto.Message): + r"""If this report results is + `sampled `__, + this describes the percentage of events used in this report. + Sampling is the practice of analyzing a subset of all data in order + to uncover the meaningful information in the larger data set. + + Attributes: + samples_read_count (int): + The total number of events read in this + sampled report for a date range. This is the + size of the subset this property's data that was + analyzed in this report. + sampling_space_size (int): + The total number of events present in this property's data + that could have been analyzed in this report for a date + range. Sampling uncovers the meaningful information about + the larger data set, and this is the size of the larger data + set. + + To calculate the percentage of available data that was used + in this report, compute + ``samplesReadCount/samplingSpaceSize``. + """ + + samples_read_count: int = proto.Field( + proto.INT64, + number=1, + ) + sampling_space_size: int = proto.Field( + proto.INT64, + number=2, + ) class DimensionHeader(proto.Message): @@ -1640,20 +1705,28 @@ class PropertyQuota(proto.Message): class QuotaStatus(proto.Message): r"""Current state for a particular quota group. + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: consumed (int): Quota consumed by this request. + + This field is a member of `oneof`_ ``_consumed``. remaining (int): Quota remaining after this request. + + This field is a member of `oneof`_ ``_remaining``. """ consumed: int = proto.Field( proto.INT32, number=1, + optional=True, ) remaining: int = proto.Field( proto.INT32, number=2, + optional=True, ) diff --git a/packages/google-analytics-data/samples/generated_samples/analyticsdata_v1beta_generated_beta_analytics_data_create_audience_export_async.py b/packages/google-analytics-data/samples/generated_samples/analyticsdata_v1beta_generated_beta_analytics_data_create_audience_export_async.py new file mode 100644 index 000000000000..ddbdd1fb56ad --- /dev/null +++ b/packages/google-analytics-data/samples/generated_samples/analyticsdata_v1beta_generated_beta_analytics_data_create_audience_export_async.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateAudienceExport +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-analytics-data + + +# [START analyticsdata_v1beta_generated_BetaAnalyticsData_CreateAudienceExport_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.analytics import data_v1beta + + +async def sample_create_audience_export(): + # Create a client + client = data_v1beta.BetaAnalyticsDataAsyncClient() + + # Initialize request argument(s) + audience_export = data_v1beta.AudienceExport() + audience_export.audience = "audience_value" + + request = data_v1beta.CreateAudienceExportRequest( + parent="parent_value", + audience_export=audience_export, + ) + + # Make the request + operation = client.create_audience_export(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END analyticsdata_v1beta_generated_BetaAnalyticsData_CreateAudienceExport_async] diff --git a/packages/google-analytics-data/samples/generated_samples/analyticsdata_v1beta_generated_beta_analytics_data_create_audience_export_sync.py b/packages/google-analytics-data/samples/generated_samples/analyticsdata_v1beta_generated_beta_analytics_data_create_audience_export_sync.py new file mode 100644 index 000000000000..a96fea76d9ae --- /dev/null +++ b/packages/google-analytics-data/samples/generated_samples/analyticsdata_v1beta_generated_beta_analytics_data_create_audience_export_sync.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateAudienceExport +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-analytics-data + + +# [START analyticsdata_v1beta_generated_BetaAnalyticsData_CreateAudienceExport_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.analytics import data_v1beta + + +def sample_create_audience_export(): + # Create a client + client = data_v1beta.BetaAnalyticsDataClient() + + # Initialize request argument(s) + audience_export = data_v1beta.AudienceExport() + audience_export.audience = "audience_value" + + request = data_v1beta.CreateAudienceExportRequest( + parent="parent_value", + audience_export=audience_export, + ) + + # Make the request + operation = client.create_audience_export(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END analyticsdata_v1beta_generated_BetaAnalyticsData_CreateAudienceExport_sync] diff --git a/packages/google-analytics-data/samples/generated_samples/analyticsdata_v1beta_generated_beta_analytics_data_get_audience_export_async.py b/packages/google-analytics-data/samples/generated_samples/analyticsdata_v1beta_generated_beta_analytics_data_get_audience_export_async.py new file mode 100644 index 000000000000..e1bedcc23940 --- /dev/null +++ b/packages/google-analytics-data/samples/generated_samples/analyticsdata_v1beta_generated_beta_analytics_data_get_audience_export_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetAudienceExport +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-analytics-data + + +# [START analyticsdata_v1beta_generated_BetaAnalyticsData_GetAudienceExport_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.analytics import data_v1beta + + +async def sample_get_audience_export(): + # Create a client + client = data_v1beta.BetaAnalyticsDataAsyncClient() + + # Initialize request argument(s) + request = data_v1beta.GetAudienceExportRequest( + name="name_value", + ) + + # Make the request + response = await client.get_audience_export(request=request) + + # Handle the response + print(response) + +# [END analyticsdata_v1beta_generated_BetaAnalyticsData_GetAudienceExport_async] diff --git a/packages/google-analytics-data/samples/generated_samples/analyticsdata_v1beta_generated_beta_analytics_data_get_audience_export_sync.py b/packages/google-analytics-data/samples/generated_samples/analyticsdata_v1beta_generated_beta_analytics_data_get_audience_export_sync.py new file mode 100644 index 000000000000..071d06c88baa --- /dev/null +++ b/packages/google-analytics-data/samples/generated_samples/analyticsdata_v1beta_generated_beta_analytics_data_get_audience_export_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetAudienceExport +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-analytics-data + + +# [START analyticsdata_v1beta_generated_BetaAnalyticsData_GetAudienceExport_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.analytics import data_v1beta + + +def sample_get_audience_export(): + # Create a client + client = data_v1beta.BetaAnalyticsDataClient() + + # Initialize request argument(s) + request = data_v1beta.GetAudienceExportRequest( + name="name_value", + ) + + # Make the request + response = client.get_audience_export(request=request) + + # Handle the response + print(response) + +# [END analyticsdata_v1beta_generated_BetaAnalyticsData_GetAudienceExport_sync] diff --git a/packages/google-analytics-data/samples/generated_samples/analyticsdata_v1beta_generated_beta_analytics_data_list_audience_exports_async.py b/packages/google-analytics-data/samples/generated_samples/analyticsdata_v1beta_generated_beta_analytics_data_list_audience_exports_async.py new file mode 100644 index 000000000000..81df8332ac2e --- /dev/null +++ b/packages/google-analytics-data/samples/generated_samples/analyticsdata_v1beta_generated_beta_analytics_data_list_audience_exports_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListAudienceExports +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-analytics-data + + +# [START analyticsdata_v1beta_generated_BetaAnalyticsData_ListAudienceExports_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.analytics import data_v1beta + + +async def sample_list_audience_exports(): + # Create a client + client = data_v1beta.BetaAnalyticsDataAsyncClient() + + # Initialize request argument(s) + request = data_v1beta.ListAudienceExportsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_audience_exports(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END analyticsdata_v1beta_generated_BetaAnalyticsData_ListAudienceExports_async] diff --git a/packages/google-analytics-data/samples/generated_samples/analyticsdata_v1beta_generated_beta_analytics_data_list_audience_exports_sync.py b/packages/google-analytics-data/samples/generated_samples/analyticsdata_v1beta_generated_beta_analytics_data_list_audience_exports_sync.py new file mode 100644 index 000000000000..aef6ea5af3a9 --- /dev/null +++ b/packages/google-analytics-data/samples/generated_samples/analyticsdata_v1beta_generated_beta_analytics_data_list_audience_exports_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListAudienceExports +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-analytics-data + + +# [START analyticsdata_v1beta_generated_BetaAnalyticsData_ListAudienceExports_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.analytics import data_v1beta + + +def sample_list_audience_exports(): + # Create a client + client = data_v1beta.BetaAnalyticsDataClient() + + # Initialize request argument(s) + request = data_v1beta.ListAudienceExportsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_audience_exports(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END analyticsdata_v1beta_generated_BetaAnalyticsData_ListAudienceExports_sync] diff --git a/packages/google-analytics-data/samples/generated_samples/analyticsdata_v1beta_generated_beta_analytics_data_query_audience_export_async.py b/packages/google-analytics-data/samples/generated_samples/analyticsdata_v1beta_generated_beta_analytics_data_query_audience_export_async.py new file mode 100644 index 000000000000..d33405429694 --- /dev/null +++ b/packages/google-analytics-data/samples/generated_samples/analyticsdata_v1beta_generated_beta_analytics_data_query_audience_export_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryAudienceExport +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-analytics-data + + +# [START analyticsdata_v1beta_generated_BetaAnalyticsData_QueryAudienceExport_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.analytics import data_v1beta + + +async def sample_query_audience_export(): + # Create a client + client = data_v1beta.BetaAnalyticsDataAsyncClient() + + # Initialize request argument(s) + request = data_v1beta.QueryAudienceExportRequest( + name="name_value", + ) + + # Make the request + response = await client.query_audience_export(request=request) + + # Handle the response + print(response) + +# [END analyticsdata_v1beta_generated_BetaAnalyticsData_QueryAudienceExport_async] diff --git a/packages/google-analytics-data/samples/generated_samples/analyticsdata_v1beta_generated_beta_analytics_data_query_audience_export_sync.py b/packages/google-analytics-data/samples/generated_samples/analyticsdata_v1beta_generated_beta_analytics_data_query_audience_export_sync.py new file mode 100644 index 000000000000..cafc44c01402 --- /dev/null +++ b/packages/google-analytics-data/samples/generated_samples/analyticsdata_v1beta_generated_beta_analytics_data_query_audience_export_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryAudienceExport +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-analytics-data + + +# [START analyticsdata_v1beta_generated_BetaAnalyticsData_QueryAudienceExport_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.analytics import data_v1beta + + +def sample_query_audience_export(): + # Create a client + client = data_v1beta.BetaAnalyticsDataClient() + + # Initialize request argument(s) + request = data_v1beta.QueryAudienceExportRequest( + name="name_value", + ) + + # Make the request + response = client.query_audience_export(request=request) + + # Handle the response + print(response) + +# [END analyticsdata_v1beta_generated_BetaAnalyticsData_QueryAudienceExport_sync] diff --git a/packages/google-analytics-data/samples/generated_samples/snippet_metadata_google.analytics.data.v1alpha.json b/packages/google-analytics-data/samples/generated_samples/snippet_metadata_google.analytics.data.v1alpha.json index 218723a01eaa..f35436636628 100644 --- a/packages/google-analytics-data/samples/generated_samples/snippet_metadata_google.analytics.data.v1alpha.json +++ b/packages/google-analytics-data/samples/generated_samples/snippet_metadata_google.analytics.data.v1alpha.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-analytics-data", - "version": "0.18.1" + "version": "0.1.0" }, "snippets": [ { diff --git a/packages/google-analytics-data/samples/generated_samples/snippet_metadata_google.analytics.data.v1beta.json b/packages/google-analytics-data/samples/generated_samples/snippet_metadata_google.analytics.data.v1beta.json index 306aa479d695..29e86a085403 100644 --- a/packages/google-analytics-data/samples/generated_samples/snippet_metadata_google.analytics.data.v1beta.json +++ b/packages/google-analytics-data/samples/generated_samples/snippet_metadata_google.analytics.data.v1beta.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-analytics-data", - "version": "0.18.1" + "version": "0.1.0" }, "snippets": [ { @@ -470,6 +470,336 @@ ], "title": "analyticsdata_v1beta_generated_beta_analytics_data_check_compatibility_sync.py" }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.analytics.data_v1beta.BetaAnalyticsDataAsyncClient", + "shortName": "BetaAnalyticsDataAsyncClient" + }, + "fullName": "google.analytics.data_v1beta.BetaAnalyticsDataAsyncClient.create_audience_export", + "method": { + "fullName": "google.analytics.data.v1beta.BetaAnalyticsData.CreateAudienceExport", + "service": { + "fullName": "google.analytics.data.v1beta.BetaAnalyticsData", + "shortName": "BetaAnalyticsData" + }, + "shortName": "CreateAudienceExport" + }, + "parameters": [ + { + "name": "request", + "type": "google.analytics.data_v1beta.types.CreateAudienceExportRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "audience_export", + "type": "google.analytics.data_v1beta.types.AudienceExport" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_audience_export" + }, + "description": "Sample for CreateAudienceExport", + "file": "analyticsdata_v1beta_generated_beta_analytics_data_create_audience_export_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "analyticsdata_v1beta_generated_BetaAnalyticsData_CreateAudienceExport_async", + "segments": [ + { + "end": 59, + "start": 27, + "type": "FULL" + }, + { + "end": 59, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 56, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 60, + "start": 57, + "type": "RESPONSE_HANDLING" + } + ], + "title": "analyticsdata_v1beta_generated_beta_analytics_data_create_audience_export_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.analytics.data_v1beta.BetaAnalyticsDataClient", + "shortName": "BetaAnalyticsDataClient" + }, + "fullName": "google.analytics.data_v1beta.BetaAnalyticsDataClient.create_audience_export", + "method": { + "fullName": "google.analytics.data.v1beta.BetaAnalyticsData.CreateAudienceExport", + "service": { + "fullName": "google.analytics.data.v1beta.BetaAnalyticsData", + "shortName": "BetaAnalyticsData" + }, + "shortName": "CreateAudienceExport" + }, + "parameters": [ + { + "name": "request", + "type": "google.analytics.data_v1beta.types.CreateAudienceExportRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "audience_export", + "type": "google.analytics.data_v1beta.types.AudienceExport" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_audience_export" + }, + "description": "Sample for CreateAudienceExport", + "file": "analyticsdata_v1beta_generated_beta_analytics_data_create_audience_export_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "analyticsdata_v1beta_generated_BetaAnalyticsData_CreateAudienceExport_sync", + "segments": [ + { + "end": 59, + "start": 27, + "type": "FULL" + }, + { + "end": 59, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 56, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 60, + "start": 57, + "type": "RESPONSE_HANDLING" + } + ], + "title": "analyticsdata_v1beta_generated_beta_analytics_data_create_audience_export_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.analytics.data_v1beta.BetaAnalyticsDataAsyncClient", + "shortName": "BetaAnalyticsDataAsyncClient" + }, + "fullName": "google.analytics.data_v1beta.BetaAnalyticsDataAsyncClient.get_audience_export", + "method": { + "fullName": "google.analytics.data.v1beta.BetaAnalyticsData.GetAudienceExport", + "service": { + "fullName": "google.analytics.data.v1beta.BetaAnalyticsData", + "shortName": "BetaAnalyticsData" + }, + "shortName": "GetAudienceExport" + }, + "parameters": [ + { + "name": "request", + "type": "google.analytics.data_v1beta.types.GetAudienceExportRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.analytics.data_v1beta.types.AudienceExport", + "shortName": "get_audience_export" + }, + "description": "Sample for GetAudienceExport", + "file": "analyticsdata_v1beta_generated_beta_analytics_data_get_audience_export_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "analyticsdata_v1beta_generated_BetaAnalyticsData_GetAudienceExport_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "analyticsdata_v1beta_generated_beta_analytics_data_get_audience_export_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.analytics.data_v1beta.BetaAnalyticsDataClient", + "shortName": "BetaAnalyticsDataClient" + }, + "fullName": "google.analytics.data_v1beta.BetaAnalyticsDataClient.get_audience_export", + "method": { + "fullName": "google.analytics.data.v1beta.BetaAnalyticsData.GetAudienceExport", + "service": { + "fullName": "google.analytics.data.v1beta.BetaAnalyticsData", + "shortName": "BetaAnalyticsData" + }, + "shortName": "GetAudienceExport" + }, + "parameters": [ + { + "name": "request", + "type": "google.analytics.data_v1beta.types.GetAudienceExportRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.analytics.data_v1beta.types.AudienceExport", + "shortName": "get_audience_export" + }, + "description": "Sample for GetAudienceExport", + "file": "analyticsdata_v1beta_generated_beta_analytics_data_get_audience_export_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "analyticsdata_v1beta_generated_BetaAnalyticsData_GetAudienceExport_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "analyticsdata_v1beta_generated_beta_analytics_data_get_audience_export_sync.py" + }, { "canonical": true, "clientMethod": { @@ -513,10 +843,90 @@ "shortName": "get_metadata" }, "description": "Sample for GetMetadata", - "file": "analyticsdata_v1beta_generated_beta_analytics_data_get_metadata_async.py", + "file": "analyticsdata_v1beta_generated_beta_analytics_data_get_metadata_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "analyticsdata_v1beta_generated_BetaAnalyticsData_GetMetadata_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "analyticsdata_v1beta_generated_beta_analytics_data_get_metadata_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.analytics.data_v1beta.BetaAnalyticsDataClient", + "shortName": "BetaAnalyticsDataClient" + }, + "fullName": "google.analytics.data_v1beta.BetaAnalyticsDataClient.get_metadata", + "method": { + "fullName": "google.analytics.data.v1beta.BetaAnalyticsData.GetMetadata", + "service": { + "fullName": "google.analytics.data.v1beta.BetaAnalyticsData", + "shortName": "BetaAnalyticsData" + }, + "shortName": "GetMetadata" + }, + "parameters": [ + { + "name": "request", + "type": "google.analytics.data_v1beta.types.GetMetadataRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.analytics.data_v1beta.types.Metadata", + "shortName": "get_metadata" + }, + "description": "Sample for GetMetadata", + "file": "analyticsdata_v1beta_generated_beta_analytics_data_get_metadata_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "analyticsdata_v1beta_generated_BetaAnalyticsData_GetMetadata_async", + "regionTag": "analyticsdata_v1beta_generated_BetaAnalyticsData_GetMetadata_sync", "segments": [ { "end": 51, @@ -549,7 +959,88 @@ "type": "RESPONSE_HANDLING" } ], - "title": "analyticsdata_v1beta_generated_beta_analytics_data_get_metadata_async.py" + "title": "analyticsdata_v1beta_generated_beta_analytics_data_get_metadata_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.analytics.data_v1beta.BetaAnalyticsDataAsyncClient", + "shortName": "BetaAnalyticsDataAsyncClient" + }, + "fullName": "google.analytics.data_v1beta.BetaAnalyticsDataAsyncClient.list_audience_exports", + "method": { + "fullName": "google.analytics.data.v1beta.BetaAnalyticsData.ListAudienceExports", + "service": { + "fullName": "google.analytics.data.v1beta.BetaAnalyticsData", + "shortName": "BetaAnalyticsData" + }, + "shortName": "ListAudienceExports" + }, + "parameters": [ + { + "name": "request", + "type": "google.analytics.data_v1beta.types.ListAudienceExportsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.analytics.data_v1beta.services.beta_analytics_data.pagers.ListAudienceExportsAsyncPager", + "shortName": "list_audience_exports" + }, + "description": "Sample for ListAudienceExports", + "file": "analyticsdata_v1beta_generated_beta_analytics_data_list_audience_exports_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "analyticsdata_v1beta_generated_BetaAnalyticsData_ListAudienceExports_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "analyticsdata_v1beta_generated_beta_analytics_data_list_audience_exports_async.py" }, { "canonical": true, @@ -558,19 +1049,100 @@ "fullName": "google.analytics.data_v1beta.BetaAnalyticsDataClient", "shortName": "BetaAnalyticsDataClient" }, - "fullName": "google.analytics.data_v1beta.BetaAnalyticsDataClient.get_metadata", + "fullName": "google.analytics.data_v1beta.BetaAnalyticsDataClient.list_audience_exports", "method": { - "fullName": "google.analytics.data.v1beta.BetaAnalyticsData.GetMetadata", + "fullName": "google.analytics.data.v1beta.BetaAnalyticsData.ListAudienceExports", "service": { "fullName": "google.analytics.data.v1beta.BetaAnalyticsData", "shortName": "BetaAnalyticsData" }, - "shortName": "GetMetadata" + "shortName": "ListAudienceExports" }, "parameters": [ { "name": "request", - "type": "google.analytics.data_v1beta.types.GetMetadataRequest" + "type": "google.analytics.data_v1beta.types.ListAudienceExportsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.analytics.data_v1beta.services.beta_analytics_data.pagers.ListAudienceExportsPager", + "shortName": "list_audience_exports" + }, + "description": "Sample for ListAudienceExports", + "file": "analyticsdata_v1beta_generated_beta_analytics_data_list_audience_exports_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "analyticsdata_v1beta_generated_BetaAnalyticsData_ListAudienceExports_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "analyticsdata_v1beta_generated_beta_analytics_data_list_audience_exports_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.analytics.data_v1beta.BetaAnalyticsDataAsyncClient", + "shortName": "BetaAnalyticsDataAsyncClient" + }, + "fullName": "google.analytics.data_v1beta.BetaAnalyticsDataAsyncClient.query_audience_export", + "method": { + "fullName": "google.analytics.data.v1beta.BetaAnalyticsData.QueryAudienceExport", + "service": { + "fullName": "google.analytics.data.v1beta.BetaAnalyticsData", + "shortName": "BetaAnalyticsData" + }, + "shortName": "QueryAudienceExport" + }, + "parameters": [ + { + "name": "request", + "type": "google.analytics.data_v1beta.types.QueryAudienceExportRequest" }, { "name": "name", @@ -589,14 +1161,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.analytics.data_v1beta.types.Metadata", - "shortName": "get_metadata" + "resultType": "google.analytics.data_v1beta.types.QueryAudienceExportResponse", + "shortName": "query_audience_export" }, - "description": "Sample for GetMetadata", - "file": "analyticsdata_v1beta_generated_beta_analytics_data_get_metadata_sync.py", + "description": "Sample for QueryAudienceExport", + "file": "analyticsdata_v1beta_generated_beta_analytics_data_query_audience_export_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "analyticsdata_v1beta_generated_BetaAnalyticsData_GetMetadata_sync", + "regionTag": "analyticsdata_v1beta_generated_BetaAnalyticsData_QueryAudienceExport_async", "segments": [ { "end": 51, @@ -629,7 +1201,87 @@ "type": "RESPONSE_HANDLING" } ], - "title": "analyticsdata_v1beta_generated_beta_analytics_data_get_metadata_sync.py" + "title": "analyticsdata_v1beta_generated_beta_analytics_data_query_audience_export_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.analytics.data_v1beta.BetaAnalyticsDataClient", + "shortName": "BetaAnalyticsDataClient" + }, + "fullName": "google.analytics.data_v1beta.BetaAnalyticsDataClient.query_audience_export", + "method": { + "fullName": "google.analytics.data.v1beta.BetaAnalyticsData.QueryAudienceExport", + "service": { + "fullName": "google.analytics.data.v1beta.BetaAnalyticsData", + "shortName": "BetaAnalyticsData" + }, + "shortName": "QueryAudienceExport" + }, + "parameters": [ + { + "name": "request", + "type": "google.analytics.data_v1beta.types.QueryAudienceExportRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.analytics.data_v1beta.types.QueryAudienceExportResponse", + "shortName": "query_audience_export" + }, + "description": "Sample for QueryAudienceExport", + "file": "analyticsdata_v1beta_generated_beta_analytics_data_query_audience_export_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "analyticsdata_v1beta_generated_BetaAnalyticsData_QueryAudienceExport_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "analyticsdata_v1beta_generated_beta_analytics_data_query_audience_export_sync.py" }, { "canonical": true, diff --git a/packages/google-analytics-data/scripts/fixup_data_v1beta_keywords.py b/packages/google-analytics-data/scripts/fixup_data_v1beta_keywords.py index 7057fd59e2eb..43dd6a4d91ab 100644 --- a/packages/google-analytics-data/scripts/fixup_data_v1beta_keywords.py +++ b/packages/google-analytics-data/scripts/fixup_data_v1beta_keywords.py @@ -42,7 +42,11 @@ class dataCallTransformer(cst.CSTTransformer): 'batch_run_pivot_reports': ('property', 'requests', ), 'batch_run_reports': ('property', 'requests', ), 'check_compatibility': ('property', 'dimensions', 'metrics', 'dimension_filter', 'metric_filter', 'compatibility_filter', ), + 'create_audience_export': ('parent', 'audience_export', ), + 'get_audience_export': ('name', ), 'get_metadata': ('name', ), + 'list_audience_exports': ('parent', 'page_size', 'page_token', ), + 'query_audience_export': ('name', 'offset', 'limit', ), 'run_pivot_report': ('property', 'dimensions', 'metrics', 'date_ranges', 'pivots', 'dimension_filter', 'metric_filter', 'currency_code', 'cohort_spec', 'keep_empty_rows', 'return_property_quota', ), 'run_realtime_report': ('property', 'dimensions', 'metrics', 'dimension_filter', 'metric_filter', 'limit', 'metric_aggregations', 'order_bys', 'return_property_quota', 'minute_ranges', ), 'run_report': ('property', 'dimensions', 'metrics', 'date_ranges', 'dimension_filter', 'metric_filter', 'offset', 'limit', 'metric_aggregations', 'order_bys', 'currency_code', 'cohort_spec', 'keep_empty_rows', 'return_property_quota', ), diff --git a/packages/google-analytics-data/tests/unit/gapic/data_v1beta/test_beta_analytics_data.py b/packages/google-analytics-data/tests/unit/gapic/data_v1beta/test_beta_analytics_data.py index 122acd0ea983..65d5f5b6e3f9 100644 --- a/packages/google-analytics-data/tests/unit/gapic/data_v1beta/test_beta_analytics_data.py +++ b/packages/google-analytics-data/tests/unit/gapic/data_v1beta/test_beta_analytics_data.py @@ -26,14 +26,25 @@ import json import math -from google.api_core import gapic_v1, grpc_helpers, grpc_helpers_async, path_template +from google.api_core import ( + future, + gapic_v1, + grpc_helpers, + grpc_helpers_async, + operation, + operations_v1, + path_template, +) from google.api_core import client_options from google.api_core import exceptions as core_exceptions +from google.api_core import operation_async # type: ignore import google.auth from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError +from google.longrunning import operations_pb2 # type: ignore from google.oauth2 import service_account from google.protobuf import json_format +from google.protobuf import timestamp_pb2 # type: ignore import grpc from grpc.experimental import aio from proto.marshal.rules import wrappers @@ -45,6 +56,7 @@ from google.analytics.data_v1beta.services.beta_analytics_data import ( BetaAnalyticsDataAsyncClient, BetaAnalyticsDataClient, + pagers, transports, ) from google.analytics.data_v1beta.types import analytics_data_api, data @@ -1916,241 +1928,2115 @@ async def test_check_compatibility_field_headers_async(): @pytest.mark.parametrize( "request_type", [ - analytics_data_api.RunReportRequest, + analytics_data_api.CreateAudienceExportRequest, dict, ], ) -def test_run_report_rest(request_type): +def test_create_audience_export(request_type, transport: str = "grpc"): client = BetaAnalyticsDataClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport=transport, ) - # send a request that will satisfy transcoding - request_init = {"property": "properties/sample1"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = analytics_data_api.RunReportResponse( - row_count=992, - kind="kind_value", - ) + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = analytics_data_api.RunReportResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_audience_export), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.create_audience_export(request) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.run_report(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == analytics_data_api.CreateAudienceExportRequest() # Establish that the response is the type that we expect. - assert isinstance(response, analytics_data_api.RunReportResponse) - assert response.row_count == 992 - assert response.kind == "kind_value" + assert isinstance(response, future.Future) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_run_report_rest_interceptors(null_interceptor): - transport = transports.BetaAnalyticsDataRestTransport( +def test_create_audience_export_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BetaAnalyticsDataClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BetaAnalyticsDataRestInterceptor(), + transport="grpc", ) - client = BetaAnalyticsDataClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BetaAnalyticsDataRestInterceptor, "post_run_report" - ) as post, mock.patch.object( - transports.BetaAnalyticsDataRestInterceptor, "pre_run_report" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = analytics_data_api.RunReportRequest.pb( - analytics_data_api.RunReportRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = analytics_data_api.RunReportResponse.to_json( - analytics_data_api.RunReportResponse() - ) - - request = analytics_data_api.RunReportRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = analytics_data_api.RunReportResponse() - - client.run_report( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - pre.assert_called_once() - post.assert_called_once() + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_audience_export), "__call__" + ) as call: + client.create_audience_export() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == analytics_data_api.CreateAudienceExportRequest() -def test_run_report_rest_bad_request( - transport: str = "rest", request_type=analytics_data_api.RunReportRequest +@pytest.mark.asyncio +async def test_create_audience_export_async( + transport: str = "grpc_asyncio", + request_type=analytics_data_api.CreateAudienceExportRequest, ): - client = BetaAnalyticsDataClient( + client = BetaAnalyticsDataAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) - # send a request that will satisfy transcoding - request_init = {"property": "properties/sample1"} - request = request_type(**request_init) + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.run_report(request) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_audience_export), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_audience_export(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == analytics_data_api.CreateAudienceExportRequest() + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) -def test_run_report_rest_error(): - client = BetaAnalyticsDataClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) +@pytest.mark.asyncio +async def test_create_audience_export_async_from_dict(): + await test_create_audience_export_async(request_type=dict) -@pytest.mark.parametrize( - "request_type", - [ - analytics_data_api.RunPivotReportRequest, - dict, - ], -) -def test_run_pivot_report_rest(request_type): + +def test_create_audience_export_field_headers(): client = BetaAnalyticsDataClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - # send a request that will satisfy transcoding - request_init = {"property": "properties/sample1"} - request = request_type(**request_init) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = analytics_data_api.CreateAudienceExportRequest() - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = analytics_data_api.RunPivotReportResponse( - kind="kind_value", - ) + request.parent = "parent_value" - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = analytics_data_api.RunPivotReportResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_audience_export), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_audience_export(request) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.run_pivot_report(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request - # Establish that the response is the type that we expect. - assert isinstance(response, analytics_data_api.RunPivotReportResponse) - assert response.kind == "kind_value" + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_run_pivot_report_rest_interceptors(null_interceptor): - transport = transports.BetaAnalyticsDataRestTransport( +@pytest.mark.asyncio +async def test_create_audience_export_field_headers_async(): + client = BetaAnalyticsDataAsyncClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BetaAnalyticsDataRestInterceptor(), ) - client = BetaAnalyticsDataClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BetaAnalyticsDataRestInterceptor, "post_run_pivot_report" - ) as post, mock.patch.object( - transports.BetaAnalyticsDataRestInterceptor, "pre_run_pivot_report" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = analytics_data_api.RunPivotReportRequest.pb( - analytics_data_api.RunPivotReportRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = analytics_data_api.RunPivotReportResponse.to_json( - analytics_data_api.RunPivotReportResponse() - ) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = analytics_data_api.CreateAudienceExportRequest() - request = analytics_data_api.RunPivotReportRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = analytics_data_api.RunPivotReportResponse() + request.parent = "parent_value" - client.run_pivot_report( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_audience_export), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") ) + await client.create_audience_export(request) - pre.assert_called_once() - post.assert_called_once() + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] -def test_run_pivot_report_rest_bad_request( - transport: str = "rest", request_type=analytics_data_api.RunPivotReportRequest -): + +def test_create_audience_export_flattened(): client = BetaAnalyticsDataClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, ) - # send a request that will satisfy transcoding - request_init = {"property": "properties/sample1"} - request = request_type(**request_init) - + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_audience_export), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_audience_export( + parent="parent_value", + audience_export=analytics_data_api.AudienceExport(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].audience_export + mock_val = analytics_data_api.AudienceExport(name="name_value") + assert arg == mock_val + + +def test_create_audience_export_flattened_error(): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_audience_export( + analytics_data_api.CreateAudienceExportRequest(), + parent="parent_value", + audience_export=analytics_data_api.AudienceExport(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_audience_export_flattened_async(): + client = BetaAnalyticsDataAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_audience_export), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_audience_export( + parent="parent_value", + audience_export=analytics_data_api.AudienceExport(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].audience_export + mock_val = analytics_data_api.AudienceExport(name="name_value") + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_create_audience_export_flattened_error_async(): + client = BetaAnalyticsDataAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_audience_export( + analytics_data_api.CreateAudienceExportRequest(), + parent="parent_value", + audience_export=analytics_data_api.AudienceExport(name="name_value"), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + analytics_data_api.QueryAudienceExportRequest, + dict, + ], +) +def test_query_audience_export(request_type, transport: str = "grpc"): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_audience_export), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = analytics_data_api.QueryAudienceExportResponse( + row_count=992, + ) + response = client.query_audience_export(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == analytics_data_api.QueryAudienceExportRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, analytics_data_api.QueryAudienceExportResponse) + assert response.row_count == 992 + + +def test_query_audience_export_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_audience_export), "__call__" + ) as call: + client.query_audience_export() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == analytics_data_api.QueryAudienceExportRequest() + + +@pytest.mark.asyncio +async def test_query_audience_export_async( + transport: str = "grpc_asyncio", + request_type=analytics_data_api.QueryAudienceExportRequest, +): + client = BetaAnalyticsDataAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_audience_export), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + analytics_data_api.QueryAudienceExportResponse( + row_count=992, + ) + ) + response = await client.query_audience_export(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == analytics_data_api.QueryAudienceExportRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, analytics_data_api.QueryAudienceExportResponse) + assert response.row_count == 992 + + +@pytest.mark.asyncio +async def test_query_audience_export_async_from_dict(): + await test_query_audience_export_async(request_type=dict) + + +def test_query_audience_export_field_headers(): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = analytics_data_api.QueryAudienceExportRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_audience_export), "__call__" + ) as call: + call.return_value = analytics_data_api.QueryAudienceExportResponse() + client.query_audience_export(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_query_audience_export_field_headers_async(): + client = BetaAnalyticsDataAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = analytics_data_api.QueryAudienceExportRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_audience_export), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + analytics_data_api.QueryAudienceExportResponse() + ) + await client.query_audience_export(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_query_audience_export_flattened(): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_audience_export), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = analytics_data_api.QueryAudienceExportResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.query_audience_export( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_query_audience_export_flattened_error(): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.query_audience_export( + analytics_data_api.QueryAudienceExportRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_query_audience_export_flattened_async(): + client = BetaAnalyticsDataAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_audience_export), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = analytics_data_api.QueryAudienceExportResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + analytics_data_api.QueryAudienceExportResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.query_audience_export( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_query_audience_export_flattened_error_async(): + client = BetaAnalyticsDataAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.query_audience_export( + analytics_data_api.QueryAudienceExportRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + analytics_data_api.GetAudienceExportRequest, + dict, + ], +) +def test_get_audience_export(request_type, transport: str = "grpc"): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_audience_export), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = analytics_data_api.AudienceExport( + name="name_value", + audience="audience_value", + audience_display_name="audience_display_name_value", + state=analytics_data_api.AudienceExport.State.CREATING, + creation_quota_tokens_charged=3070, + row_count=992, + error_message="error_message_value", + percentage_completed=0.2106, + ) + response = client.get_audience_export(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == analytics_data_api.GetAudienceExportRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, analytics_data_api.AudienceExport) + assert response.name == "name_value" + assert response.audience == "audience_value" + assert response.audience_display_name == "audience_display_name_value" + assert response.state == analytics_data_api.AudienceExport.State.CREATING + assert response.creation_quota_tokens_charged == 3070 + assert response.row_count == 992 + assert response.error_message == "error_message_value" + assert math.isclose(response.percentage_completed, 0.2106, rel_tol=1e-6) + + +def test_get_audience_export_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_audience_export), "__call__" + ) as call: + client.get_audience_export() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == analytics_data_api.GetAudienceExportRequest() + + +@pytest.mark.asyncio +async def test_get_audience_export_async( + transport: str = "grpc_asyncio", + request_type=analytics_data_api.GetAudienceExportRequest, +): + client = BetaAnalyticsDataAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_audience_export), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + analytics_data_api.AudienceExport( + name="name_value", + audience="audience_value", + audience_display_name="audience_display_name_value", + state=analytics_data_api.AudienceExport.State.CREATING, + creation_quota_tokens_charged=3070, + row_count=992, + error_message="error_message_value", + percentage_completed=0.2106, + ) + ) + response = await client.get_audience_export(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == analytics_data_api.GetAudienceExportRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, analytics_data_api.AudienceExport) + assert response.name == "name_value" + assert response.audience == "audience_value" + assert response.audience_display_name == "audience_display_name_value" + assert response.state == analytics_data_api.AudienceExport.State.CREATING + assert response.creation_quota_tokens_charged == 3070 + assert response.row_count == 992 + assert response.error_message == "error_message_value" + assert math.isclose(response.percentage_completed, 0.2106, rel_tol=1e-6) + + +@pytest.mark.asyncio +async def test_get_audience_export_async_from_dict(): + await test_get_audience_export_async(request_type=dict) + + +def test_get_audience_export_field_headers(): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = analytics_data_api.GetAudienceExportRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_audience_export), "__call__" + ) as call: + call.return_value = analytics_data_api.AudienceExport() + client.get_audience_export(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_audience_export_field_headers_async(): + client = BetaAnalyticsDataAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = analytics_data_api.GetAudienceExportRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_audience_export), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + analytics_data_api.AudienceExport() + ) + await client.get_audience_export(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_audience_export_flattened(): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_audience_export), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = analytics_data_api.AudienceExport() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_audience_export( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_audience_export_flattened_error(): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_audience_export( + analytics_data_api.GetAudienceExportRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_audience_export_flattened_async(): + client = BetaAnalyticsDataAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_audience_export), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = analytics_data_api.AudienceExport() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + analytics_data_api.AudienceExport() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_audience_export( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_audience_export_flattened_error_async(): + client = BetaAnalyticsDataAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_audience_export( + analytics_data_api.GetAudienceExportRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + analytics_data_api.ListAudienceExportsRequest, + dict, + ], +) +def test_list_audience_exports(request_type, transport: str = "grpc"): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_audience_exports), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = analytics_data_api.ListAudienceExportsResponse( + next_page_token="next_page_token_value", + ) + response = client.list_audience_exports(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == analytics_data_api.ListAudienceExportsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListAudienceExportsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_audience_exports_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_audience_exports), "__call__" + ) as call: + client.list_audience_exports() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == analytics_data_api.ListAudienceExportsRequest() + + +@pytest.mark.asyncio +async def test_list_audience_exports_async( + transport: str = "grpc_asyncio", + request_type=analytics_data_api.ListAudienceExportsRequest, +): + client = BetaAnalyticsDataAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_audience_exports), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + analytics_data_api.ListAudienceExportsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_audience_exports(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == analytics_data_api.ListAudienceExportsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListAudienceExportsAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_audience_exports_async_from_dict(): + await test_list_audience_exports_async(request_type=dict) + + +def test_list_audience_exports_field_headers(): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = analytics_data_api.ListAudienceExportsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_audience_exports), "__call__" + ) as call: + call.return_value = analytics_data_api.ListAudienceExportsResponse() + client.list_audience_exports(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_audience_exports_field_headers_async(): + client = BetaAnalyticsDataAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = analytics_data_api.ListAudienceExportsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_audience_exports), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + analytics_data_api.ListAudienceExportsResponse() + ) + await client.list_audience_exports(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_audience_exports_flattened(): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_audience_exports), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = analytics_data_api.ListAudienceExportsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_audience_exports( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +def test_list_audience_exports_flattened_error(): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_audience_exports( + analytics_data_api.ListAudienceExportsRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_audience_exports_flattened_async(): + client = BetaAnalyticsDataAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_audience_exports), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = analytics_data_api.ListAudienceExportsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + analytics_data_api.ListAudienceExportsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_audience_exports( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_audience_exports_flattened_error_async(): + client = BetaAnalyticsDataAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_audience_exports( + analytics_data_api.ListAudienceExportsRequest(), + parent="parent_value", + ) + + +def test_list_audience_exports_pager(transport_name: str = "grpc"): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_audience_exports), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + analytics_data_api.ListAudienceExportsResponse( + audience_exports=[ + analytics_data_api.AudienceExport(), + analytics_data_api.AudienceExport(), + analytics_data_api.AudienceExport(), + ], + next_page_token="abc", + ), + analytics_data_api.ListAudienceExportsResponse( + audience_exports=[], + next_page_token="def", + ), + analytics_data_api.ListAudienceExportsResponse( + audience_exports=[ + analytics_data_api.AudienceExport(), + ], + next_page_token="ghi", + ), + analytics_data_api.ListAudienceExportsResponse( + audience_exports=[ + analytics_data_api.AudienceExport(), + analytics_data_api.AudienceExport(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_audience_exports(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, analytics_data_api.AudienceExport) for i in results) + + +def test_list_audience_exports_pages(transport_name: str = "grpc"): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_audience_exports), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + analytics_data_api.ListAudienceExportsResponse( + audience_exports=[ + analytics_data_api.AudienceExport(), + analytics_data_api.AudienceExport(), + analytics_data_api.AudienceExport(), + ], + next_page_token="abc", + ), + analytics_data_api.ListAudienceExportsResponse( + audience_exports=[], + next_page_token="def", + ), + analytics_data_api.ListAudienceExportsResponse( + audience_exports=[ + analytics_data_api.AudienceExport(), + ], + next_page_token="ghi", + ), + analytics_data_api.ListAudienceExportsResponse( + audience_exports=[ + analytics_data_api.AudienceExport(), + analytics_data_api.AudienceExport(), + ], + ), + RuntimeError, + ) + pages = list(client.list_audience_exports(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_audience_exports_async_pager(): + client = BetaAnalyticsDataAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_audience_exports), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + analytics_data_api.ListAudienceExportsResponse( + audience_exports=[ + analytics_data_api.AudienceExport(), + analytics_data_api.AudienceExport(), + analytics_data_api.AudienceExport(), + ], + next_page_token="abc", + ), + analytics_data_api.ListAudienceExportsResponse( + audience_exports=[], + next_page_token="def", + ), + analytics_data_api.ListAudienceExportsResponse( + audience_exports=[ + analytics_data_api.AudienceExport(), + ], + next_page_token="ghi", + ), + analytics_data_api.ListAudienceExportsResponse( + audience_exports=[ + analytics_data_api.AudienceExport(), + analytics_data_api.AudienceExport(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_audience_exports( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, analytics_data_api.AudienceExport) for i in responses) + + +@pytest.mark.asyncio +async def test_list_audience_exports_async_pages(): + client = BetaAnalyticsDataAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_audience_exports), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + analytics_data_api.ListAudienceExportsResponse( + audience_exports=[ + analytics_data_api.AudienceExport(), + analytics_data_api.AudienceExport(), + analytics_data_api.AudienceExport(), + ], + next_page_token="abc", + ), + analytics_data_api.ListAudienceExportsResponse( + audience_exports=[], + next_page_token="def", + ), + analytics_data_api.ListAudienceExportsResponse( + audience_exports=[ + analytics_data_api.AudienceExport(), + ], + next_page_token="ghi", + ), + analytics_data_api.ListAudienceExportsResponse( + audience_exports=[ + analytics_data_api.AudienceExport(), + analytics_data_api.AudienceExport(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_audience_exports(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + analytics_data_api.RunReportRequest, + dict, + ], +) +def test_run_report_rest(request_type): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"property": "properties/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = analytics_data_api.RunReportResponse( + row_count=992, + kind="kind_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = analytics_data_api.RunReportResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.run_report(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, analytics_data_api.RunReportResponse) + assert response.row_count == 992 + assert response.kind == "kind_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_run_report_rest_interceptors(null_interceptor): + transport = transports.BetaAnalyticsDataRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BetaAnalyticsDataRestInterceptor(), + ) + client = BetaAnalyticsDataClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BetaAnalyticsDataRestInterceptor, "post_run_report" + ) as post, mock.patch.object( + transports.BetaAnalyticsDataRestInterceptor, "pre_run_report" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = analytics_data_api.RunReportRequest.pb( + analytics_data_api.RunReportRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = analytics_data_api.RunReportResponse.to_json( + analytics_data_api.RunReportResponse() + ) + + request = analytics_data_api.RunReportRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = analytics_data_api.RunReportResponse() + + client.run_report( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_run_report_rest_bad_request( + transport: str = "rest", request_type=analytics_data_api.RunReportRequest +): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"property": "properties/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.run_report(request) + + +def test_run_report_rest_error(): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + analytics_data_api.RunPivotReportRequest, + dict, + ], +) +def test_run_pivot_report_rest(request_type): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"property": "properties/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = analytics_data_api.RunPivotReportResponse( + kind="kind_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = analytics_data_api.RunPivotReportResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.run_pivot_report(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, analytics_data_api.RunPivotReportResponse) + assert response.kind == "kind_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_run_pivot_report_rest_interceptors(null_interceptor): + transport = transports.BetaAnalyticsDataRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BetaAnalyticsDataRestInterceptor(), + ) + client = BetaAnalyticsDataClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BetaAnalyticsDataRestInterceptor, "post_run_pivot_report" + ) as post, mock.patch.object( + transports.BetaAnalyticsDataRestInterceptor, "pre_run_pivot_report" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = analytics_data_api.RunPivotReportRequest.pb( + analytics_data_api.RunPivotReportRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = analytics_data_api.RunPivotReportResponse.to_json( + analytics_data_api.RunPivotReportResponse() + ) + + request = analytics_data_api.RunPivotReportRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = analytics_data_api.RunPivotReportResponse() + + client.run_pivot_report( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_run_pivot_report_rest_bad_request( + transport: str = "rest", request_type=analytics_data_api.RunPivotReportRequest +): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"property": "properties/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.run_pivot_report(request) + + +def test_run_pivot_report_rest_error(): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + analytics_data_api.BatchRunReportsRequest, + dict, + ], +) +def test_batch_run_reports_rest(request_type): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"property": "properties/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = analytics_data_api.BatchRunReportsResponse( + kind="kind_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = analytics_data_api.BatchRunReportsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.batch_run_reports(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, analytics_data_api.BatchRunReportsResponse) + assert response.kind == "kind_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_batch_run_reports_rest_interceptors(null_interceptor): + transport = transports.BetaAnalyticsDataRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BetaAnalyticsDataRestInterceptor(), + ) + client = BetaAnalyticsDataClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BetaAnalyticsDataRestInterceptor, "post_batch_run_reports" + ) as post, mock.patch.object( + transports.BetaAnalyticsDataRestInterceptor, "pre_batch_run_reports" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = analytics_data_api.BatchRunReportsRequest.pb( + analytics_data_api.BatchRunReportsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = analytics_data_api.BatchRunReportsResponse.to_json( + analytics_data_api.BatchRunReportsResponse() + ) + + request = analytics_data_api.BatchRunReportsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = analytics_data_api.BatchRunReportsResponse() + + client.batch_run_reports( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_batch_run_reports_rest_bad_request( + transport: str = "rest", request_type=analytics_data_api.BatchRunReportsRequest +): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"property": "properties/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.batch_run_reports(request) + + +def test_batch_run_reports_rest_error(): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + analytics_data_api.BatchRunPivotReportsRequest, + dict, + ], +) +def test_batch_run_pivot_reports_rest(request_type): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"property": "properties/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = analytics_data_api.BatchRunPivotReportsResponse( + kind="kind_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = analytics_data_api.BatchRunPivotReportsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.batch_run_pivot_reports(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, analytics_data_api.BatchRunPivotReportsResponse) + assert response.kind == "kind_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_batch_run_pivot_reports_rest_interceptors(null_interceptor): + transport = transports.BetaAnalyticsDataRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BetaAnalyticsDataRestInterceptor(), + ) + client = BetaAnalyticsDataClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BetaAnalyticsDataRestInterceptor, "post_batch_run_pivot_reports" + ) as post, mock.patch.object( + transports.BetaAnalyticsDataRestInterceptor, "pre_batch_run_pivot_reports" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = analytics_data_api.BatchRunPivotReportsRequest.pb( + analytics_data_api.BatchRunPivotReportsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = ( + analytics_data_api.BatchRunPivotReportsResponse.to_json( + analytics_data_api.BatchRunPivotReportsResponse() + ) + ) + + request = analytics_data_api.BatchRunPivotReportsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = analytics_data_api.BatchRunPivotReportsResponse() + + client.batch_run_pivot_reports( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_batch_run_pivot_reports_rest_bad_request( + transport: str = "rest", request_type=analytics_data_api.BatchRunPivotReportsRequest +): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"property": "properties/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.batch_run_pivot_reports(request) + + +def test_batch_run_pivot_reports_rest_error(): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + analytics_data_api.GetMetadataRequest, + dict, + ], +) +def test_get_metadata_rest(request_type): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "properties/sample1/metadata"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = analytics_data_api.Metadata( + name="name_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = analytics_data_api.Metadata.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_metadata(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, analytics_data_api.Metadata) + assert response.name == "name_value" + + +def test_get_metadata_rest_required_fields( + request_type=analytics_data_api.GetMetadataRequest, +): + transport_class = transports.BetaAnalyticsDataRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_metadata._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_metadata._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = analytics_data_api.Metadata() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = analytics_data_api.Metadata.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_metadata(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_metadata_rest_unset_required_fields(): + transport = transports.BetaAnalyticsDataRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_metadata._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_metadata_rest_interceptors(null_interceptor): + transport = transports.BetaAnalyticsDataRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BetaAnalyticsDataRestInterceptor(), + ) + client = BetaAnalyticsDataClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BetaAnalyticsDataRestInterceptor, "post_get_metadata" + ) as post, mock.patch.object( + transports.BetaAnalyticsDataRestInterceptor, "pre_get_metadata" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = analytics_data_api.GetMetadataRequest.pb( + analytics_data_api.GetMetadataRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = analytics_data_api.Metadata.to_json( + analytics_data_api.Metadata() + ) + + request = analytics_data_api.GetMetadataRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = analytics_data_api.Metadata() + + client.get_metadata( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_metadata_rest_bad_request( + transport: str = "rest", request_type=analytics_data_api.GetMetadataRequest +): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "properties/sample1/metadata"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_metadata(request) + + +def test_get_metadata_rest_flattened(): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = analytics_data_api.Metadata() + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "properties/sample1/metadata"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = analytics_data_api.Metadata.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_metadata(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{name=properties/*/metadata}" % client.transport._host, args[1] + ) + + +def test_get_metadata_rest_flattened_error(transport: str = "rest"): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_metadata( + analytics_data_api.GetMetadataRequest(), + name="name_value", + ) + + +def test_get_metadata_rest_error(): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + analytics_data_api.RunRealtimeReportRequest, + dict, + ], +) +def test_run_realtime_report_rest(request_type): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"property": "properties/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = analytics_data_api.RunRealtimeReportResponse( + row_count=992, + kind="kind_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = analytics_data_api.RunRealtimeReportResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.run_realtime_report(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, analytics_data_api.RunRealtimeReportResponse) + assert response.row_count == 992 + assert response.kind == "kind_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_run_realtime_report_rest_interceptors(null_interceptor): + transport = transports.BetaAnalyticsDataRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BetaAnalyticsDataRestInterceptor(), + ) + client = BetaAnalyticsDataClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BetaAnalyticsDataRestInterceptor, "post_run_realtime_report" + ) as post, mock.patch.object( + transports.BetaAnalyticsDataRestInterceptor, "pre_run_realtime_report" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = analytics_data_api.RunRealtimeReportRequest.pb( + analytics_data_api.RunRealtimeReportRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = ( + analytics_data_api.RunRealtimeReportResponse.to_json( + analytics_data_api.RunRealtimeReportResponse() + ) + ) + + request = analytics_data_api.RunRealtimeReportRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = analytics_data_api.RunRealtimeReportResponse() + + client.run_realtime_report( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_run_realtime_report_rest_bad_request( + transport: str = "rest", request_type=analytics_data_api.RunRealtimeReportRequest +): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"property": "properties/sample1"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a BadRequest error. with mock.patch.object(Session, "request") as req, pytest.raises( core_exceptions.BadRequest @@ -2160,10 +4046,10 @@ def test_run_pivot_report_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value - client.run_pivot_report(request) + client.run_realtime_report(request) -def test_run_pivot_report_rest_error(): +def test_run_realtime_report_rest_error(): client = BetaAnalyticsDataClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -2172,11 +4058,11 @@ def test_run_pivot_report_rest_error(): @pytest.mark.parametrize( "request_type", [ - analytics_data_api.BatchRunReportsRequest, + analytics_data_api.CheckCompatibilityRequest, dict, ], ) -def test_batch_run_reports_rest(request_type): +def test_check_compatibility_rest(request_type): client = BetaAnalyticsDataClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -2189,28 +4075,25 @@ def test_batch_run_reports_rest(request_type): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = analytics_data_api.BatchRunReportsResponse( - kind="kind_value", - ) + return_value = analytics_data_api.CheckCompatibilityResponse() # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = analytics_data_api.BatchRunReportsResponse.pb(return_value) + return_value = analytics_data_api.CheckCompatibilityResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.batch_run_reports(request) + response = client.check_compatibility(request) # Establish that the response is the type that we expect. - assert isinstance(response, analytics_data_api.BatchRunReportsResponse) - assert response.kind == "kind_value" + assert isinstance(response, analytics_data_api.CheckCompatibilityResponse) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_batch_run_reports_rest_interceptors(null_interceptor): +def test_check_compatibility_rest_interceptors(null_interceptor): transport = transports.BetaAnalyticsDataRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -2223,14 +4106,14 @@ def test_batch_run_reports_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BetaAnalyticsDataRestInterceptor, "post_batch_run_reports" + transports.BetaAnalyticsDataRestInterceptor, "post_check_compatibility" ) as post, mock.patch.object( - transports.BetaAnalyticsDataRestInterceptor, "pre_batch_run_reports" + transports.BetaAnalyticsDataRestInterceptor, "pre_check_compatibility" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = analytics_data_api.BatchRunReportsRequest.pb( - analytics_data_api.BatchRunReportsRequest() + pb_message = analytics_data_api.CheckCompatibilityRequest.pb( + analytics_data_api.CheckCompatibilityRequest() ) transcode.return_value = { "method": "post", @@ -2242,19 +4125,21 @@ def test_batch_run_reports_rest_interceptors(null_interceptor): req.return_value = Response() req.return_value.status_code = 200 req.return_value.request = PreparedRequest() - req.return_value._content = analytics_data_api.BatchRunReportsResponse.to_json( - analytics_data_api.BatchRunReportsResponse() + req.return_value._content = ( + analytics_data_api.CheckCompatibilityResponse.to_json( + analytics_data_api.CheckCompatibilityResponse() + ) ) - request = analytics_data_api.BatchRunReportsRequest() + request = analytics_data_api.CheckCompatibilityRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = analytics_data_api.BatchRunReportsResponse() + post.return_value = analytics_data_api.CheckCompatibilityResponse() - client.batch_run_reports( + client.check_compatibility( request, metadata=[ ("key", "val"), @@ -2266,8 +4151,8 @@ def test_batch_run_reports_rest_interceptors(null_interceptor): post.assert_called_once() -def test_batch_run_reports_rest_bad_request( - transport: str = "rest", request_type=analytics_data_api.BatchRunReportsRequest +def test_check_compatibility_rest_bad_request( + transport: str = "rest", request_type=analytics_data_api.CheckCompatibilityRequest ): client = BetaAnalyticsDataClient( credentials=ga_credentials.AnonymousCredentials(), @@ -2287,10 +4172,10 @@ def test_batch_run_reports_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value - client.batch_run_reports(request) + client.check_compatibility(request) -def test_batch_run_reports_rest_error(): +def test_check_compatibility_rest_error(): client = BetaAnalyticsDataClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -2299,45 +4184,214 @@ def test_batch_run_reports_rest_error(): @pytest.mark.parametrize( "request_type", [ - analytics_data_api.BatchRunPivotReportsRequest, + analytics_data_api.CreateAudienceExportRequest, dict, ], ) -def test_batch_run_pivot_reports_rest(request_type): +def test_create_audience_export_rest(request_type): client = BetaAnalyticsDataClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # send a request that will satisfy transcoding - request_init = {"property": "properties/sample1"} + request_init = {"parent": "properties/sample1"} + request_init["audience_export"] = { + "name": "name_value", + "audience": "audience_value", + "audience_display_name": "audience_display_name_value", + "dimensions": [{"dimension_name": "dimension_name_value"}], + "state": 1, + "begin_creating_time": {"seconds": 751, "nanos": 543}, + "creation_quota_tokens_charged": 3070, + "row_count": 992, + "error_message": "error_message_value", + "percentage_completed": 0.2106, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = analytics_data_api.CreateAudienceExportRequest.meta.fields[ + "audience_export" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["audience_export"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["audience_export"][field])): + del request_init["audience_export"][field][i][subfield] + else: + del request_init["audience_export"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = analytics_data_api.BatchRunPivotReportsResponse( - kind="kind_value", - ) + return_value = operations_pb2.Operation(name="operations/spam") # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - # Convert return value to protobuf type - return_value = analytics_data_api.BatchRunPivotReportsResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.batch_run_pivot_reports(request) + response = client.create_audience_export(request) # Establish that the response is the type that we expect. - assert isinstance(response, analytics_data_api.BatchRunPivotReportsResponse) - assert response.kind == "kind_value" + assert response.operation.name == "operations/spam" + + +def test_create_audience_export_rest_required_fields( + request_type=analytics_data_api.CreateAudienceExportRequest, +): + transport_class = transports.BetaAnalyticsDataRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_audience_export._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_audience_export._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.create_audience_export(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_audience_export_rest_unset_required_fields(): + transport = transports.BetaAnalyticsDataRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_audience_export._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "parent", + "audienceExport", + ) + ) + ) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_batch_run_pivot_reports_rest_interceptors(null_interceptor): +def test_create_audience_export_rest_interceptors(null_interceptor): transport = transports.BetaAnalyticsDataRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -2350,14 +4404,16 @@ def test_batch_run_pivot_reports_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BetaAnalyticsDataRestInterceptor, "post_batch_run_pivot_reports" + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BetaAnalyticsDataRestInterceptor, "post_create_audience_export" ) as post, mock.patch.object( - transports.BetaAnalyticsDataRestInterceptor, "pre_batch_run_pivot_reports" + transports.BetaAnalyticsDataRestInterceptor, "pre_create_audience_export" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = analytics_data_api.BatchRunPivotReportsRequest.pb( - analytics_data_api.BatchRunPivotReportsRequest() + pb_message = analytics_data_api.CreateAudienceExportRequest.pb( + analytics_data_api.CreateAudienceExportRequest() ) transcode.return_value = { "method": "post", @@ -2367,23 +4423,21 @@ def test_batch_run_pivot_reports_rest_interceptors(null_interceptor): } req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = ( - analytics_data_api.BatchRunPivotReportsResponse.to_json( - analytics_data_api.BatchRunPivotReportsResponse() - ) + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() ) - request = analytics_data_api.BatchRunPivotReportsRequest() + request = analytics_data_api.CreateAudienceExportRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = analytics_data_api.BatchRunPivotReportsResponse() + post.return_value = operations_pb2.Operation() - client.batch_run_pivot_reports( + client.create_audience_export( request, metadata=[ ("key", "val"), @@ -2395,8 +4449,8 @@ def test_batch_run_pivot_reports_rest_interceptors(null_interceptor): post.assert_called_once() -def test_batch_run_pivot_reports_rest_bad_request( - transport: str = "rest", request_type=analytics_data_api.BatchRunPivotReportsRequest +def test_create_audience_export_rest_bad_request( + transport: str = "rest", request_type=analytics_data_api.CreateAudienceExportRequest ): client = BetaAnalyticsDataClient( credentials=ga_credentials.AnonymousCredentials(), @@ -2404,7 +4458,7 @@ def test_batch_run_pivot_reports_rest_bad_request( ) # send a request that will satisfy transcoding - request_init = {"property": "properties/sample1"} + request_init = {"parent": "properties/sample1"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -2416,10 +4470,66 @@ def test_batch_run_pivot_reports_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value - client.batch_run_pivot_reports(request) + client.create_audience_export(request) -def test_batch_run_pivot_reports_rest_error(): +def test_create_audience_export_rest_flattened(): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "properties/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + audience_export=analytics_data_api.AudienceExport(name="name_value"), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.create_audience_export(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{parent=properties/*}/audienceExports" % client.transport._host, + args[1], + ) + + +def test_create_audience_export_rest_flattened_error(transport: str = "rest"): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_audience_export( + analytics_data_api.CreateAudienceExportRequest(), + parent="parent_value", + audience_export=analytics_data_api.AudienceExport(name="name_value"), + ) + + +def test_create_audience_export_rest_error(): client = BetaAnalyticsDataClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -2428,45 +4538,45 @@ def test_batch_run_pivot_reports_rest_error(): @pytest.mark.parametrize( "request_type", [ - analytics_data_api.GetMetadataRequest, + analytics_data_api.QueryAudienceExportRequest, dict, ], ) -def test_get_metadata_rest(request_type): +def test_query_audience_export_rest(request_type): client = BetaAnalyticsDataClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # send a request that will satisfy transcoding - request_init = {"name": "properties/sample1/metadata"} + request_init = {"name": "properties/sample1/audienceExports/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = analytics_data_api.Metadata( - name="name_value", + return_value = analytics_data_api.QueryAudienceExportResponse( + row_count=992, ) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = analytics_data_api.Metadata.pb(return_value) + return_value = analytics_data_api.QueryAudienceExportResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.get_metadata(request) + response = client.query_audience_export(request) # Establish that the response is the type that we expect. - assert isinstance(response, analytics_data_api.Metadata) - assert response.name == "name_value" + assert isinstance(response, analytics_data_api.QueryAudienceExportResponse) + assert response.row_count == 992 -def test_get_metadata_rest_required_fields( - request_type=analytics_data_api.GetMetadataRequest, +def test_query_audience_export_rest_required_fields( + request_type=analytics_data_api.QueryAudienceExportRequest, ): transport_class = transports.BetaAnalyticsDataRestTransport @@ -2486,7 +4596,7 @@ def test_get_metadata_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).get_metadata._get_unset_required_fields(jsonified_request) + ).query_audience_export._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present @@ -2495,7 +4605,7 @@ def test_get_metadata_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).get_metadata._get_unset_required_fields(jsonified_request) + ).query_audience_export._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -2509,7 +4619,7 @@ def test_get_metadata_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = analytics_data_api.Metadata() + return_value = analytics_data_api.QueryAudienceExportResponse() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -2521,39 +4631,42 @@ def test_get_metadata_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "get", + "method": "post", "query_params": pb_request, } + transcode_result["body"] = pb_request transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = analytics_data_api.Metadata.pb(return_value) + return_value = analytics_data_api.QueryAudienceExportResponse.pb( + return_value + ) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.get_metadata(request) + response = client.query_audience_export(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_get_metadata_rest_unset_required_fields(): +def test_query_audience_export_rest_unset_required_fields(): transport = transports.BetaAnalyticsDataRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.get_metadata._get_unset_required_fields({}) + unset_fields = transport.query_audience_export._get_unset_required_fields({}) assert set(unset_fields) == (set(()) & set(("name",))) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_metadata_rest_interceptors(null_interceptor): +def test_query_audience_export_rest_interceptors(null_interceptor): transport = transports.BetaAnalyticsDataRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -2566,14 +4679,14 @@ def test_get_metadata_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BetaAnalyticsDataRestInterceptor, "post_get_metadata" + transports.BetaAnalyticsDataRestInterceptor, "post_query_audience_export" ) as post, mock.patch.object( - transports.BetaAnalyticsDataRestInterceptor, "pre_get_metadata" + transports.BetaAnalyticsDataRestInterceptor, "pre_query_audience_export" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = analytics_data_api.GetMetadataRequest.pb( - analytics_data_api.GetMetadataRequest() + pb_message = analytics_data_api.QueryAudienceExportRequest.pb( + analytics_data_api.QueryAudienceExportRequest() ) transcode.return_value = { "method": "post", @@ -2585,19 +4698,21 @@ def test_get_metadata_rest_interceptors(null_interceptor): req.return_value = Response() req.return_value.status_code = 200 req.return_value.request = PreparedRequest() - req.return_value._content = analytics_data_api.Metadata.to_json( - analytics_data_api.Metadata() + req.return_value._content = ( + analytics_data_api.QueryAudienceExportResponse.to_json( + analytics_data_api.QueryAudienceExportResponse() + ) ) - request = analytics_data_api.GetMetadataRequest() + request = analytics_data_api.QueryAudienceExportRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = analytics_data_api.Metadata() + post.return_value = analytics_data_api.QueryAudienceExportResponse() - client.get_metadata( + client.query_audience_export( request, metadata=[ ("key", "val"), @@ -2609,8 +4724,8 @@ def test_get_metadata_rest_interceptors(null_interceptor): post.assert_called_once() -def test_get_metadata_rest_bad_request( - transport: str = "rest", request_type=analytics_data_api.GetMetadataRequest +def test_query_audience_export_rest_bad_request( + transport: str = "rest", request_type=analytics_data_api.QueryAudienceExportRequest ): client = BetaAnalyticsDataClient( credentials=ga_credentials.AnonymousCredentials(), @@ -2618,7 +4733,7 @@ def test_get_metadata_rest_bad_request( ) # send a request that will satisfy transcoding - request_init = {"name": "properties/sample1/metadata"} + request_init = {"name": "properties/sample1/audienceExports/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -2630,10 +4745,10 @@ def test_get_metadata_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value - client.get_metadata(request) + client.query_audience_export(request) -def test_get_metadata_rest_flattened(): +def test_query_audience_export_rest_flattened(): client = BetaAnalyticsDataClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -2642,10 +4757,10 @@ def test_get_metadata_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = analytics_data_api.Metadata() + return_value = analytics_data_api.QueryAudienceExportResponse() # get arguments that satisfy an http rule for this method - sample_request = {"name": "properties/sample1/metadata"} + sample_request = {"name": "properties/sample1/audienceExports/sample2"} # get truthy value for each flattened field mock_args = dict( @@ -2657,23 +4772,25 @@ def test_get_metadata_rest_flattened(): response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = analytics_data_api.Metadata.pb(return_value) + return_value = analytics_data_api.QueryAudienceExportResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.get_metadata(**mock_args) + client.query_audience_export(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v1beta/{name=properties/*/metadata}" % client.transport._host, args[1] + "%s/v1beta/{name=properties/*/audienceExports/*}:query" + % client.transport._host, + args[1], ) -def test_get_metadata_rest_flattened_error(transport: str = "rest"): +def test_query_audience_export_rest_flattened_error(transport: str = "rest"): client = BetaAnalyticsDataClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -2682,13 +4799,13 @@ def test_get_metadata_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.get_metadata( - analytics_data_api.GetMetadataRequest(), + client.query_audience_export( + analytics_data_api.QueryAudienceExportRequest(), name="name_value", ) -def test_get_metadata_rest_error(): +def test_query_audience_export_rest_error(): client = BetaAnalyticsDataClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -2697,47 +4814,146 @@ def test_get_metadata_rest_error(): @pytest.mark.parametrize( "request_type", [ - analytics_data_api.RunRealtimeReportRequest, + analytics_data_api.GetAudienceExportRequest, dict, ], ) -def test_run_realtime_report_rest(request_type): +def test_get_audience_export_rest(request_type): client = BetaAnalyticsDataClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # send a request that will satisfy transcoding - request_init = {"property": "properties/sample1"} + request_init = {"name": "properties/sample1/audienceExports/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = analytics_data_api.RunRealtimeReportResponse( + return_value = analytics_data_api.AudienceExport( + name="name_value", + audience="audience_value", + audience_display_name="audience_display_name_value", + state=analytics_data_api.AudienceExport.State.CREATING, + creation_quota_tokens_charged=3070, row_count=992, - kind="kind_value", + error_message="error_message_value", + percentage_completed=0.2106, ) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = analytics_data_api.RunRealtimeReportResponse.pb(return_value) + return_value = analytics_data_api.AudienceExport.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.run_realtime_report(request) + response = client.get_audience_export(request) # Establish that the response is the type that we expect. - assert isinstance(response, analytics_data_api.RunRealtimeReportResponse) + assert isinstance(response, analytics_data_api.AudienceExport) + assert response.name == "name_value" + assert response.audience == "audience_value" + assert response.audience_display_name == "audience_display_name_value" + assert response.state == analytics_data_api.AudienceExport.State.CREATING + assert response.creation_quota_tokens_charged == 3070 assert response.row_count == 992 - assert response.kind == "kind_value" + assert response.error_message == "error_message_value" + assert math.isclose(response.percentage_completed, 0.2106, rel_tol=1e-6) + + +def test_get_audience_export_rest_required_fields( + request_type=analytics_data_api.GetAudienceExportRequest, +): + transport_class = transports.BetaAnalyticsDataRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_audience_export._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_audience_export._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = analytics_data_api.AudienceExport() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = analytics_data_api.AudienceExport.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_audience_export(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_audience_export_rest_unset_required_fields(): + transport = transports.BetaAnalyticsDataRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_audience_export._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_run_realtime_report_rest_interceptors(null_interceptor): +def test_get_audience_export_rest_interceptors(null_interceptor): transport = transports.BetaAnalyticsDataRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -2750,14 +4966,14 @@ def test_run_realtime_report_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BetaAnalyticsDataRestInterceptor, "post_run_realtime_report" + transports.BetaAnalyticsDataRestInterceptor, "post_get_audience_export" ) as post, mock.patch.object( - transports.BetaAnalyticsDataRestInterceptor, "pre_run_realtime_report" + transports.BetaAnalyticsDataRestInterceptor, "pre_get_audience_export" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = analytics_data_api.RunRealtimeReportRequest.pb( - analytics_data_api.RunRealtimeReportRequest() + pb_message = analytics_data_api.GetAudienceExportRequest.pb( + analytics_data_api.GetAudienceExportRequest() ) transcode.return_value = { "method": "post", @@ -2769,57 +4985,111 @@ def test_run_realtime_report_rest_interceptors(null_interceptor): req.return_value = Response() req.return_value.status_code = 200 req.return_value.request = PreparedRequest() - req.return_value._content = ( - analytics_data_api.RunRealtimeReportResponse.to_json( - analytics_data_api.RunRealtimeReportResponse() - ) + req.return_value._content = analytics_data_api.AudienceExport.to_json( + analytics_data_api.AudienceExport() ) - request = analytics_data_api.RunRealtimeReportRequest() + request = analytics_data_api.GetAudienceExportRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = analytics_data_api.RunRealtimeReportResponse() + post.return_value = analytics_data_api.AudienceExport() + + client.get_audience_export( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_audience_export_rest_bad_request( + transport: str = "rest", request_type=analytics_data_api.GetAudienceExportRequest +): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "properties/sample1/audienceExports/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_audience_export(request) + + +def test_get_audience_export_rest_flattened(): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = analytics_data_api.AudienceExport() + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "properties/sample1/audienceExports/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = analytics_data_api.AudienceExport.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value - client.run_realtime_report( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) + client.get_audience_export(**mock_args) - pre.assert_called_once() - post.assert_called_once() + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{name=properties/*/audienceExports/*}" % client.transport._host, + args[1], + ) -def test_run_realtime_report_rest_bad_request( - transport: str = "rest", request_type=analytics_data_api.RunRealtimeReportRequest -): +def test_get_audience_export_rest_flattened_error(transport: str = "rest"): client = BetaAnalyticsDataClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) - # send a request that will satisfy transcoding - request_init = {"property": "properties/sample1"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.run_realtime_report(request) + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_audience_export( + analytics_data_api.GetAudienceExportRequest(), + name="name_value", + ) -def test_run_realtime_report_rest_error(): +def test_get_audience_export_rest_error(): client = BetaAnalyticsDataClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -2828,42 +5098,149 @@ def test_run_realtime_report_rest_error(): @pytest.mark.parametrize( "request_type", [ - analytics_data_api.CheckCompatibilityRequest, + analytics_data_api.ListAudienceExportsRequest, dict, ], ) -def test_check_compatibility_rest(request_type): +def test_list_audience_exports_rest(request_type): client = BetaAnalyticsDataClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # send a request that will satisfy transcoding - request_init = {"property": "properties/sample1"} + request_init = {"parent": "properties/sample1"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = analytics_data_api.CheckCompatibilityResponse() + return_value = analytics_data_api.ListAudienceExportsResponse( + next_page_token="next_page_token_value", + ) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = analytics_data_api.CheckCompatibilityResponse.pb(return_value) + return_value = analytics_data_api.ListAudienceExportsResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.check_compatibility(request) + response = client.list_audience_exports(request) # Establish that the response is the type that we expect. - assert isinstance(response, analytics_data_api.CheckCompatibilityResponse) + assert isinstance(response, pagers.ListAudienceExportsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_audience_exports_rest_required_fields( + request_type=analytics_data_api.ListAudienceExportsRequest, +): + transport_class = transports.BetaAnalyticsDataRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_audience_exports._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_audience_exports._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = analytics_data_api.ListAudienceExportsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = analytics_data_api.ListAudienceExportsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_audience_exports(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_audience_exports_rest_unset_required_fields(): + transport = transports.BetaAnalyticsDataRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_audience_exports._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_check_compatibility_rest_interceptors(null_interceptor): +def test_list_audience_exports_rest_interceptors(null_interceptor): transport = transports.BetaAnalyticsDataRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -2876,14 +5253,14 @@ def test_check_compatibility_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BetaAnalyticsDataRestInterceptor, "post_check_compatibility" + transports.BetaAnalyticsDataRestInterceptor, "post_list_audience_exports" ) as post, mock.patch.object( - transports.BetaAnalyticsDataRestInterceptor, "pre_check_compatibility" + transports.BetaAnalyticsDataRestInterceptor, "pre_list_audience_exports" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = analytics_data_api.CheckCompatibilityRequest.pb( - analytics_data_api.CheckCompatibilityRequest() + pb_message = analytics_data_api.ListAudienceExportsRequest.pb( + analytics_data_api.ListAudienceExportsRequest() ) transcode.return_value = { "method": "post", @@ -2896,20 +5273,20 @@ def test_check_compatibility_rest_interceptors(null_interceptor): req.return_value.status_code = 200 req.return_value.request = PreparedRequest() req.return_value._content = ( - analytics_data_api.CheckCompatibilityResponse.to_json( - analytics_data_api.CheckCompatibilityResponse() + analytics_data_api.ListAudienceExportsResponse.to_json( + analytics_data_api.ListAudienceExportsResponse() ) ) - request = analytics_data_api.CheckCompatibilityRequest() + request = analytics_data_api.ListAudienceExportsRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = analytics_data_api.CheckCompatibilityResponse() + post.return_value = analytics_data_api.ListAudienceExportsResponse() - client.check_compatibility( + client.list_audience_exports( request, metadata=[ ("key", "val"), @@ -2921,8 +5298,8 @@ def test_check_compatibility_rest_interceptors(null_interceptor): post.assert_called_once() -def test_check_compatibility_rest_bad_request( - transport: str = "rest", request_type=analytics_data_api.CheckCompatibilityRequest +def test_list_audience_exports_rest_bad_request( + transport: str = "rest", request_type=analytics_data_api.ListAudienceExportsRequest ): client = BetaAnalyticsDataClient( credentials=ga_credentials.AnonymousCredentials(), @@ -2930,7 +5307,7 @@ def test_check_compatibility_rest_bad_request( ) # send a request that will satisfy transcoding - request_init = {"property": "properties/sample1"} + request_init = {"parent": "properties/sample1"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -2942,14 +5319,127 @@ def test_check_compatibility_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value - client.check_compatibility(request) + client.list_audience_exports(request) -def test_check_compatibility_rest_error(): +def test_list_audience_exports_rest_flattened(): client = BetaAnalyticsDataClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = analytics_data_api.ListAudienceExportsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "properties/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = analytics_data_api.ListAudienceExportsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.list_audience_exports(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{parent=properties/*}/audienceExports" % client.transport._host, + args[1], + ) + + +def test_list_audience_exports_rest_flattened_error(transport: str = "rest"): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_audience_exports( + analytics_data_api.ListAudienceExportsRequest(), + parent="parent_value", + ) + + +def test_list_audience_exports_rest_pager(transport: str = "rest"): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + analytics_data_api.ListAudienceExportsResponse( + audience_exports=[ + analytics_data_api.AudienceExport(), + analytics_data_api.AudienceExport(), + analytics_data_api.AudienceExport(), + ], + next_page_token="abc", + ), + analytics_data_api.ListAudienceExportsResponse( + audience_exports=[], + next_page_token="def", + ), + analytics_data_api.ListAudienceExportsResponse( + audience_exports=[ + analytics_data_api.AudienceExport(), + ], + next_page_token="ghi", + ), + analytics_data_api.ListAudienceExportsResponse( + audience_exports=[ + analytics_data_api.AudienceExport(), + analytics_data_api.AudienceExport(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + analytics_data_api.ListAudienceExportsResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "properties/sample1"} + + pager = client.list_audience_exports(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, analytics_data_api.AudienceExport) for i in results) + + pages = list(client.list_audience_exports(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + def test_credentials_transport_error(): # It is an error to provide credentials and a transport instance. @@ -3097,6 +5587,10 @@ def test_beta_analytics_data_base_transport(): "get_metadata", "run_realtime_report", "check_compatibility", + "create_audience_export", + "query_audience_export", + "get_audience_export", + "list_audience_exports", ) for method in methods: with pytest.raises(NotImplementedError): @@ -3105,6 +5599,11 @@ def test_beta_analytics_data_base_transport(): with pytest.raises(NotImplementedError): transport.close() + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + # Catch all for all remaining methods and properties remainder = [ "kind", @@ -3306,6 +5805,23 @@ def test_beta_analytics_data_http_transport_client_cert_source_for_mtls(): mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) +def test_beta_analytics_data_rest_lro_client(): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.AbstractOperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + @pytest.mark.parametrize( "transport_name", [ @@ -3390,6 +5906,18 @@ def test_beta_analytics_data_client_transport_session_collision(transport_name): session1 = client1.transport.check_compatibility._session session2 = client2.transport.check_compatibility._session assert session1 != session2 + session1 = client1.transport.create_audience_export._session + session2 = client2.transport.create_audience_export._session + assert session1 != session2 + session1 = client1.transport.query_audience_export._session + session2 = client2.transport.query_audience_export._session + assert session1 != session2 + session1 = client1.transport.get_audience_export._session + session2 = client2.transport.get_audience_export._session + assert session1 != session2 + session1 = client1.transport.list_audience_exports._session + session2 = client2.transport.list_audience_exports._session + assert session1 != session2 def test_beta_analytics_data_grpc_transport_channel(): @@ -3518,8 +6046,65 @@ def test_beta_analytics_data_transport_channel_mtls_with_adc(transport_class): assert transport.grpc_channel == mock_grpc_channel -def test_metadata_path(): +def test_beta_analytics_data_grpc_lro_client(): + client = BetaAnalyticsDataClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_beta_analytics_data_grpc_lro_async_client(): + client = BetaAnalyticsDataAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_audience_export_path(): property = "squid" + audience_export = "clam" + expected = "properties/{property}/audienceExports/{audience_export}".format( + property=property, + audience_export=audience_export, + ) + actual = BetaAnalyticsDataClient.audience_export_path(property, audience_export) + assert expected == actual + + +def test_parse_audience_export_path(): + expected = { + "property": "whelk", + "audience_export": "octopus", + } + path = BetaAnalyticsDataClient.audience_export_path(**expected) + + # Check that the path construction is reversible. + actual = BetaAnalyticsDataClient.parse_audience_export_path(path) + assert expected == actual + + +def test_metadata_path(): + property = "oyster" expected = "properties/{property}/metadata".format( property=property, ) @@ -3529,7 +6114,7 @@ def test_metadata_path(): def test_parse_metadata_path(): expected = { - "property": "clam", + "property": "nudibranch", } path = BetaAnalyticsDataClient.metadata_path(**expected) @@ -3539,7 +6124,7 @@ def test_parse_metadata_path(): def test_common_billing_account_path(): - billing_account = "whelk" + billing_account = "cuttlefish" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -3549,7 +6134,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "octopus", + "billing_account": "mussel", } path = BetaAnalyticsDataClient.common_billing_account_path(**expected) @@ -3559,7 +6144,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "oyster" + folder = "winkle" expected = "folders/{folder}".format( folder=folder, ) @@ -3569,7 +6154,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "nudibranch", + "folder": "nautilus", } path = BetaAnalyticsDataClient.common_folder_path(**expected) @@ -3579,7 +6164,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "cuttlefish" + organization = "scallop" expected = "organizations/{organization}".format( organization=organization, ) @@ -3589,7 +6174,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "mussel", + "organization": "abalone", } path = BetaAnalyticsDataClient.common_organization_path(**expected) @@ -3599,7 +6184,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "winkle" + project = "squid" expected = "projects/{project}".format( project=project, ) @@ -3609,7 +6194,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "nautilus", + "project": "clam", } path = BetaAnalyticsDataClient.common_project_path(**expected) @@ -3619,8 +6204,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "scallop" - location = "abalone" + project = "whelk" + location = "octopus" expected = "projects/{project}/locations/{location}".format( project=project, location=location, @@ -3631,8 +6216,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "squid", - "location": "clam", + "project": "oyster", + "location": "nudibranch", } path = BetaAnalyticsDataClient.common_location_path(**expected) From 23d8814baa6288d94484d52a98714fd32755ada3 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Sat, 9 Dec 2023 08:55:32 -0500 Subject: [PATCH 07/80] feat: [google-ai-generativelanguage] Add v1beta, adds GenerativeService and RetrievalService (#12106) - [ ] Regenerate this pull request now. BEGIN_COMMIT_OVERRIDE feat: Add v1beta, adds GenerativeService and RetrievalService feat: Add v1, contains only GenerativeService, nothing else feat: Set google.ai.generativelanguage_v1beta as the default import END_COMMIT_OVERRIDE PiperOrigin-RevId: 589233134 Source-Link: https://github.com/googleapis/googleapis/commit/f5b6a3ba4f2bcb2d314873c7f2fb8af185edb94a Source-Link: https://github.com/googleapis/googleapis-gen/commit/737108023d39ff445869408dd71d689b163059a8 Copy-Tag: eyJwIjoicGFja2FnZXMvZ29vZ2xlLWFpLWdlbmVyYXRpdmVsYW5ndWFnZS8uT3dsQm90LnlhbWwiLCJoIjoiNzM3MTA4MDIzZDM5ZmY0NDU4Njk0MDhkZDcxZDY4OWIxNjMwNTlhOCJ9 --------- Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- .../.repo-metadata.json | 2 +- .../generative_service.rst | 6 + .../generativelanguage_v1/model_service.rst | 10 + .../docs/generativelanguage_v1/services_.rst | 7 + .../docs/generativelanguage_v1/types_.rst | 6 + .../discuss_service.rst | 6 + .../generative_service.rst | 6 + .../model_service.rst | 10 + .../permission_service.rst | 10 + .../retriever_service.rst | 10 + .../generativelanguage_v1beta/services_.rst | 11 + .../text_service.rst | 6 + .../docs/generativelanguage_v1beta/types_.rst | 6 + .../docs/index.rst | 22 +- .../google/ai/generativelanguage/__init__.py | 186 +- .../ai/generativelanguage/gapic_version.py | 2 +- .../ai/generativelanguage_v1/__init__.py | 75 + .../generativelanguage_v1/gapic_metadata.json | 152 + .../ai/generativelanguage_v1/gapic_version.py | 16 + .../google/ai/generativelanguage_v1/py.typed | 2 + .../services/__init__.py | 15 + .../services/generative_service/__init__.py | 22 + .../generative_service/async_client.py | 1069 ++ .../services/generative_service/client.py | 1252 ++ .../generative_service/transports/__init__.py | 36 + .../generative_service/transports/base.py | 299 + .../generative_service/transports/grpc.py | 443 + .../transports/grpc_asyncio.py | 443 + .../generative_service/transports/rest.py | 1179 ++ .../services/model_service/__init__.py | 22 + .../services/model_service/async_client.py | 625 + .../services/model_service/client.py | 856 ++ .../services/model_service/pagers.py | 155 + .../model_service/transports/__init__.py | 36 + .../services/model_service/transports/base.py | 199 + .../services/model_service/transports/grpc.py | 344 + .../model_service/transports/grpc_asyncio.py | 347 + .../services/model_service/transports/rest.py | 688 + .../generativelanguage_v1/types/__init__.py | 61 + .../generativelanguage_v1/types/citation.py | 101 + .../ai/generativelanguage_v1/types/content.py | 130 + .../types/generative_service.py | 597 + .../ai/generativelanguage_v1/types/model.py | 155 + .../types/model_service.py | 113 + .../ai/generativelanguage_v1/types/safety.py | 192 + .../ai/generativelanguage_v1beta/__init__.py | 303 + .../gapic_metadata.json | 798 ++ .../gapic_version.py | 16 + .../ai/generativelanguage_v1beta/py.typed | 2 + .../services/__init__.py | 15 + .../services/discuss_service/__init__.py | 22 + .../services/discuss_service/async_client.py | 564 + .../services/discuss_service/client.py | 775 + .../discuss_service/transports/__init__.py | 36 + .../discuss_service/transports/base.py | 194 + .../discuss_service/transports/grpc.py | 303 + .../transports/grpc_asyncio.py | 302 + .../discuss_service/transports/rest.py | 474 + .../services/generative_service/__init__.py | 22 + .../generative_service/async_client.py | 1070 ++ .../services/generative_service/client.py | 1244 ++ .../generative_service/transports/__init__.py | 36 + .../generative_service/transports/base.py | 298 + .../generative_service/transports/grpc.py | 420 + .../transports/grpc_asyncio.py | 420 + .../generative_service/transports/rest.py | 1051 ++ .../services/model_service/__init__.py | 22 + .../services/model_service/async_client.py | 1101 ++ .../services/model_service/client.py | 1282 ++ .../services/model_service/pagers.py | 283 + .../model_service/transports/__init__.py | 36 + .../services/model_service/transports/base.py | 316 + .../services/model_service/transports/grpc.py | 446 + .../model_service/transports/grpc_asyncio.py | 458 + .../services/model_service/transports/rest.py | 1082 ++ .../services/permission_service/__init__.py | 22 + .../permission_service/async_client.py | 971 ++ .../services/permission_service/client.py | 1160 ++ .../services/permission_service/pagers.py | 155 + .../permission_service/transports/__init__.py | 36 + .../permission_service/transports/base.py | 280 + .../permission_service/transports/grpc.py | 413 + .../transports/grpc_asyncio.py | 418 + .../permission_service/transports/rest.py | 1050 ++ .../services/retriever_service/__init__.py | 22 + .../retriever_service/async_client.py | 2333 +++ .../services/retriever_service/client.py | 2459 ++++ .../services/retriever_service/pagers.py | 411 + .../retriever_service/transports/__init__.py | 36 + .../retriever_service/transports/base.py | 588 + .../retriever_service/transports/grpc.py | 780 + .../transports/grpc_asyncio.py | 792 ++ .../retriever_service/transports/rest.py | 2717 ++++ .../services/text_service/__init__.py | 22 + .../services/text_service/async_client.py | 835 ++ .../services/text_service/client.py | 1030 ++ .../text_service/transports/__init__.py | 36 + .../services/text_service/transports/base.py | 245 + .../services/text_service/transports/grpc.py | 357 + .../text_service/transports/grpc_asyncio.py | 360 + .../services/text_service/transports/rest.py | 746 + .../types/__init__.py | 271 + .../types/citation.py | 101 + .../types/content.py | 449 + .../types/discuss_service.py | 356 + .../types/generative_service.py | 952 ++ .../generativelanguage_v1beta/types/model.py | 155 + .../types/model_service.py | 333 + .../types/permission.py | 141 + .../types/permission_service.py | 220 + .../types/retriever.py | 411 + .../types/retriever_service.py | 793 ++ .../generativelanguage_v1beta/types/safety.py | 271 + .../types/text_service.py | 441 + .../types/tuned_model.py | 413 + .../gapic_version.py | 2 +- .../gapic_version.py | 2 +- ...tive_service_batch_embed_contents_async.py | 56 + ...ative_service_batch_embed_contents_sync.py | 56 + ...d_generative_service_count_tokens_async.py | 52 + ...ed_generative_service_count_tokens_sync.py | 52 + ..._generative_service_embed_content_async.py | 52 + ...d_generative_service_embed_content_sync.py | 52 + ...nerative_service_generate_content_async.py | 52 + ...enerative_service_generate_content_sync.py | 52 + ...e_service_stream_generate_content_async.py | 53 + ...ve_service_stream_generate_content_sync.py | 53 + ...generated_model_service_get_model_async.py | 52 + ..._generated_model_service_get_model_sync.py | 52 + ...nerated_model_service_list_models_async.py | 52 + ...enerated_model_service_list_models_sync.py | 52 + ...cuss_service_count_message_tokens_async.py | 56 + ...scuss_service_count_message_tokens_sync.py | 56 + ..._discuss_service_generate_message_async.py | 56 + ...d_discuss_service_generate_message_sync.py | 56 + ...tive_service_batch_embed_contents_async.py | 56 + ...ative_service_batch_embed_contents_sync.py | 56 + ...d_generative_service_count_tokens_async.py | 52 + ...ed_generative_service_count_tokens_sync.py | 52 + ..._generative_service_embed_content_async.py | 52 + ...d_generative_service_embed_content_sync.py | 52 + ...enerative_service_generate_answer_async.py | 53 + ...generative_service_generate_answer_sync.py | 53 + ...nerative_service_generate_content_async.py | 52 + ...enerative_service_generate_content_sync.py | 52 + ...e_service_stream_generate_content_async.py | 53 + ...ve_service_stream_generate_content_sync.py | 53 + ..._model_service_create_tuned_model_async.py | 60 + ...d_model_service_create_tuned_model_sync.py | 60 + ..._model_service_delete_tuned_model_async.py | 50 + ...d_model_service_delete_tuned_model_sync.py | 50 + ...generated_model_service_get_model_async.py | 52 + ..._generated_model_service_get_model_sync.py | 52 + ...ted_model_service_get_tuned_model_async.py | 52 + ...ated_model_service_get_tuned_model_sync.py | 52 + ...nerated_model_service_list_models_async.py | 52 + ...enerated_model_service_list_models_sync.py | 52 + ...d_model_service_list_tuned_models_async.py | 52 + ...ed_model_service_list_tuned_models_sync.py | 52 + ..._model_service_update_tuned_model_async.py | 56 + ...d_model_service_update_tuned_model_sync.py | 56 + ...mission_service_create_permission_async.py | 52 + ...rmission_service_create_permission_sync.py | 52 + ...mission_service_delete_permission_async.py | 50 + ...rmission_service_delete_permission_sync.py | 50 + ...permission_service_get_permission_async.py | 52 + ..._permission_service_get_permission_sync.py | 52 + ...rmission_service_list_permissions_async.py | 53 + ...ermission_service_list_permissions_sync.py | 53 + ...ission_service_transfer_ownership_async.py | 53 + ...mission_service_transfer_ownership_sync.py | 53 + ...mission_service_update_permission_async.py | 51 + ...rmission_service_update_permission_sync.py | 51 + ...iever_service_batch_create_chunks_async.py | 56 + ...riever_service_batch_create_chunks_sync.py | 56 + ...iever_service_batch_delete_chunks_async.py | 53 + ...riever_service_batch_delete_chunks_sync.py | 53 + ...iever_service_batch_update_chunks_async.py | 55 + ...riever_service_batch_update_chunks_sync.py | 55 + ...ed_retriever_service_create_chunk_async.py | 56 + ...ted_retriever_service_create_chunk_sync.py | 56 + ...d_retriever_service_create_corpus_async.py | 51 + ...ed_retriever_service_create_corpus_sync.py | 51 + ...retriever_service_create_document_async.py | 52 + ..._retriever_service_create_document_sync.py | 52 + ...ed_retriever_service_delete_chunk_async.py | 50 + ...ted_retriever_service_delete_chunk_sync.py | 50 + ...d_retriever_service_delete_corpus_async.py | 50 + ...ed_retriever_service_delete_corpus_sync.py | 50 + ...retriever_service_delete_document_async.py | 50 + ..._retriever_service_delete_document_sync.py | 50 + ...rated_retriever_service_get_chunk_async.py | 52 + ...erated_retriever_service_get_chunk_sync.py | 52 + ...ated_retriever_service_get_corpus_async.py | 52 + ...rated_retriever_service_get_corpus_sync.py | 52 + ...ed_retriever_service_get_document_async.py | 52 + ...ted_retriever_service_get_document_sync.py | 52 + ...ted_retriever_service_list_chunks_async.py | 53 + ...ated_retriever_service_list_chunks_sync.py | 53 + ...ed_retriever_service_list_corpora_async.py | 52 + ...ted_retriever_service_list_corpora_sync.py | 52 + ..._retriever_service_list_documents_async.py | 53 + ...d_retriever_service_list_documents_sync.py | 53 + ...ed_retriever_service_query_corpus_async.py | 53 + ...ted_retriever_service_query_corpus_sync.py | 53 + ..._retriever_service_query_document_async.py | 53 + ...d_retriever_service_query_document_sync.py | 53 + ...ed_retriever_service_update_chunk_async.py | 55 + ...ted_retriever_service_update_chunk_sync.py | 55 + ...d_retriever_service_update_corpus_async.py | 51 + ...ed_retriever_service_update_corpus_sync.py | 51 + ...retriever_service_update_document_async.py | 51 + ..._retriever_service_update_document_sync.py | 51 + ...ted_text_service_batch_embed_text_async.py | 52 + ...ated_text_service_batch_embed_text_sync.py | 52 + ...ed_text_service_count_text_tokens_async.py | 56 + ...ted_text_service_count_text_tokens_sync.py | 56 + ...generated_text_service_embed_text_async.py | 52 + ..._generated_text_service_embed_text_sync.py | 52 + ...erated_text_service_generate_text_async.py | 56 + ...nerated_text_service_generate_text_sync.py | 56 + ...adata_google.ai.generativelanguage.v1.json | 1190 ++ ...a_google.ai.generativelanguage.v1beta.json | 7440 ++++++++++ ..._google.ai.generativelanguage.v1beta2.json | 2 +- ..._google.ai.generativelanguage.v1beta3.json | 2 +- .../fixup_generativelanguage_v1_keywords.py | 182 + ...ixup_generativelanguage_v1beta_keywords.py | 220 + .../gapic/generativelanguage_v1/__init__.py | 15 + .../test_generative_service.py | 4739 ++++++ .../test_model_service.py | 3204 +++++ .../generativelanguage_v1beta/__init__.py | 15 + .../test_discuss_service.py | 2553 ++++ .../test_generative_service.py | 4722 ++++++ .../test_model_service.py | 5648 ++++++++ .../test_permission_service.py | 4929 +++++++ .../test_retriever_service.py | 11851 ++++++++++++++++ .../test_text_service.py | 3544 +++++ 237 files changed, 102636 insertions(+), 26 deletions(-) create mode 100644 packages/google-ai-generativelanguage/docs/generativelanguage_v1/generative_service.rst create mode 100644 packages/google-ai-generativelanguage/docs/generativelanguage_v1/model_service.rst create mode 100644 packages/google-ai-generativelanguage/docs/generativelanguage_v1/services_.rst create mode 100644 packages/google-ai-generativelanguage/docs/generativelanguage_v1/types_.rst create mode 100644 packages/google-ai-generativelanguage/docs/generativelanguage_v1beta/discuss_service.rst create mode 100644 packages/google-ai-generativelanguage/docs/generativelanguage_v1beta/generative_service.rst create mode 100644 packages/google-ai-generativelanguage/docs/generativelanguage_v1beta/model_service.rst create mode 100644 packages/google-ai-generativelanguage/docs/generativelanguage_v1beta/permission_service.rst create mode 100644 packages/google-ai-generativelanguage/docs/generativelanguage_v1beta/retriever_service.rst create mode 100644 packages/google-ai-generativelanguage/docs/generativelanguage_v1beta/services_.rst create mode 100644 packages/google-ai-generativelanguage/docs/generativelanguage_v1beta/text_service.rst create mode 100644 packages/google-ai-generativelanguage/docs/generativelanguage_v1beta/types_.rst create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/__init__.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/gapic_metadata.json create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/gapic_version.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/py.typed create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/__init__.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/generative_service/__init__.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/generative_service/async_client.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/generative_service/client.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/generative_service/transports/__init__.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/generative_service/transports/base.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/generative_service/transports/grpc.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/generative_service/transports/grpc_asyncio.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/generative_service/transports/rest.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/__init__.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/async_client.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/client.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/pagers.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/transports/__init__.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/transports/base.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/transports/grpc.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/transports/grpc_asyncio.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/transports/rest.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/types/__init__.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/types/citation.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/types/content.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/types/generative_service.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/types/model.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/types/model_service.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/types/safety.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/__init__.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/gapic_metadata.json create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/gapic_version.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/py.typed create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/__init__.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/discuss_service/__init__.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/discuss_service/async_client.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/discuss_service/client.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/discuss_service/transports/__init__.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/discuss_service/transports/base.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/discuss_service/transports/grpc.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/discuss_service/transports/grpc_asyncio.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/discuss_service/transports/rest.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/generative_service/__init__.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/generative_service/async_client.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/generative_service/client.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/generative_service/transports/__init__.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/generative_service/transports/base.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/generative_service/transports/grpc.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/generative_service/transports/grpc_asyncio.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/generative_service/transports/rest.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/__init__.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/async_client.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/client.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/pagers.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/transports/__init__.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/transports/base.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/transports/grpc.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/transports/grpc_asyncio.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/transports/rest.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/__init__.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/async_client.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/client.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/pagers.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/transports/__init__.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/transports/base.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/transports/grpc.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/transports/grpc_asyncio.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/transports/rest.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/__init__.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/async_client.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/client.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/pagers.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/transports/__init__.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/transports/base.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/transports/grpc.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/transports/grpc_asyncio.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/transports/rest.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/text_service/__init__.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/text_service/async_client.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/text_service/client.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/text_service/transports/__init__.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/text_service/transports/base.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/text_service/transports/grpc.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/text_service/transports/grpc_asyncio.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/text_service/transports/rest.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/__init__.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/citation.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/content.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/discuss_service.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/generative_service.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/model.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/model_service.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/permission.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/permission_service.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/retriever.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/retriever_service.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/safety.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/text_service.py create mode 100644 packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/tuned_model.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_batch_embed_contents_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_batch_embed_contents_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_count_tokens_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_count_tokens_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_embed_content_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_embed_content_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_generate_content_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_generate_content_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_stream_generate_content_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_stream_generate_content_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_model_service_get_model_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_model_service_get_model_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_model_service_list_models_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_model_service_list_models_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_discuss_service_count_message_tokens_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_discuss_service_count_message_tokens_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_discuss_service_generate_message_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_discuss_service_generate_message_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_batch_embed_contents_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_batch_embed_contents_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_count_tokens_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_count_tokens_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_embed_content_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_embed_content_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_generate_answer_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_generate_answer_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_generate_content_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_generate_content_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_stream_generate_content_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_stream_generate_content_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_create_tuned_model_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_create_tuned_model_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_delete_tuned_model_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_delete_tuned_model_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_get_model_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_get_model_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_get_tuned_model_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_get_tuned_model_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_list_models_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_list_models_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_list_tuned_models_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_list_tuned_models_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_update_tuned_model_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_update_tuned_model_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_create_permission_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_create_permission_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_delete_permission_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_delete_permission_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_get_permission_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_get_permission_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_list_permissions_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_list_permissions_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_transfer_ownership_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_transfer_ownership_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_update_permission_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_update_permission_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_batch_create_chunks_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_batch_create_chunks_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_batch_delete_chunks_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_batch_delete_chunks_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_batch_update_chunks_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_batch_update_chunks_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_create_chunk_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_create_chunk_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_create_corpus_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_create_corpus_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_create_document_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_create_document_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_delete_chunk_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_delete_chunk_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_delete_corpus_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_delete_corpus_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_delete_document_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_delete_document_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_get_chunk_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_get_chunk_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_get_corpus_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_get_corpus_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_get_document_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_get_document_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_list_chunks_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_list_chunks_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_list_corpora_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_list_corpora_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_list_documents_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_list_documents_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_query_corpus_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_query_corpus_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_query_document_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_query_document_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_update_chunk_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_update_chunk_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_update_corpus_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_update_corpus_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_update_document_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_update_document_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_text_service_batch_embed_text_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_text_service_batch_embed_text_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_text_service_count_text_tokens_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_text_service_count_text_tokens_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_text_service_embed_text_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_text_service_embed_text_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_text_service_generate_text_async.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_text_service_generate_text_sync.py create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1.json create mode 100644 packages/google-ai-generativelanguage/samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1beta.json create mode 100644 packages/google-ai-generativelanguage/scripts/fixup_generativelanguage_v1_keywords.py create mode 100644 packages/google-ai-generativelanguage/scripts/fixup_generativelanguage_v1beta_keywords.py create mode 100644 packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1/__init__.py create mode 100644 packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1/test_generative_service.py create mode 100644 packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1/test_model_service.py create mode 100644 packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1beta/__init__.py create mode 100644 packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1beta/test_discuss_service.py create mode 100644 packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1beta/test_generative_service.py create mode 100644 packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1beta/test_model_service.py create mode 100644 packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1beta/test_permission_service.py create mode 100644 packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1beta/test_retriever_service.py create mode 100644 packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1beta/test_text_service.py diff --git a/packages/google-ai-generativelanguage/.repo-metadata.json b/packages/google-ai-generativelanguage/.repo-metadata.json index bc3075ddf66f..3ae73c9657d8 100644 --- a/packages/google-ai-generativelanguage/.repo-metadata.json +++ b/packages/google-ai-generativelanguage/.repo-metadata.json @@ -11,7 +11,7 @@ "repo": "googleapis/google-cloud-python", "distribution_name": "google-ai-generativelanguage", "api_id": "generativelanguage.googleapis.com", - "default_version": "v1beta3", + "default_version": "v1beta", "codeowner_team": "", "api_shortname": "generativelanguage" } diff --git a/packages/google-ai-generativelanguage/docs/generativelanguage_v1/generative_service.rst b/packages/google-ai-generativelanguage/docs/generativelanguage_v1/generative_service.rst new file mode 100644 index 000000000000..ce358b56764b --- /dev/null +++ b/packages/google-ai-generativelanguage/docs/generativelanguage_v1/generative_service.rst @@ -0,0 +1,6 @@ +GenerativeService +----------------------------------- + +.. automodule:: google.ai.generativelanguage_v1.services.generative_service + :members: + :inherited-members: diff --git a/packages/google-ai-generativelanguage/docs/generativelanguage_v1/model_service.rst b/packages/google-ai-generativelanguage/docs/generativelanguage_v1/model_service.rst new file mode 100644 index 000000000000..1ec9fb6f5766 --- /dev/null +++ b/packages/google-ai-generativelanguage/docs/generativelanguage_v1/model_service.rst @@ -0,0 +1,10 @@ +ModelService +------------------------------ + +.. automodule:: google.ai.generativelanguage_v1.services.model_service + :members: + :inherited-members: + +.. automodule:: google.ai.generativelanguage_v1.services.model_service.pagers + :members: + :inherited-members: diff --git a/packages/google-ai-generativelanguage/docs/generativelanguage_v1/services_.rst b/packages/google-ai-generativelanguage/docs/generativelanguage_v1/services_.rst new file mode 100644 index 000000000000..988dccd7ad40 --- /dev/null +++ b/packages/google-ai-generativelanguage/docs/generativelanguage_v1/services_.rst @@ -0,0 +1,7 @@ +Services for Google Ai Generativelanguage v1 API +================================================ +.. toctree:: + :maxdepth: 2 + + generative_service + model_service diff --git a/packages/google-ai-generativelanguage/docs/generativelanguage_v1/types_.rst b/packages/google-ai-generativelanguage/docs/generativelanguage_v1/types_.rst new file mode 100644 index 000000000000..c3e9aef89c0e --- /dev/null +++ b/packages/google-ai-generativelanguage/docs/generativelanguage_v1/types_.rst @@ -0,0 +1,6 @@ +Types for Google Ai Generativelanguage v1 API +============================================= + +.. automodule:: google.ai.generativelanguage_v1.types + :members: + :show-inheritance: diff --git a/packages/google-ai-generativelanguage/docs/generativelanguage_v1beta/discuss_service.rst b/packages/google-ai-generativelanguage/docs/generativelanguage_v1beta/discuss_service.rst new file mode 100644 index 000000000000..f66ee1c4e90c --- /dev/null +++ b/packages/google-ai-generativelanguage/docs/generativelanguage_v1beta/discuss_service.rst @@ -0,0 +1,6 @@ +DiscussService +-------------------------------- + +.. automodule:: google.ai.generativelanguage_v1beta.services.discuss_service + :members: + :inherited-members: diff --git a/packages/google-ai-generativelanguage/docs/generativelanguage_v1beta/generative_service.rst b/packages/google-ai-generativelanguage/docs/generativelanguage_v1beta/generative_service.rst new file mode 100644 index 000000000000..1e19732947cb --- /dev/null +++ b/packages/google-ai-generativelanguage/docs/generativelanguage_v1beta/generative_service.rst @@ -0,0 +1,6 @@ +GenerativeService +----------------------------------- + +.. automodule:: google.ai.generativelanguage_v1beta.services.generative_service + :members: + :inherited-members: diff --git a/packages/google-ai-generativelanguage/docs/generativelanguage_v1beta/model_service.rst b/packages/google-ai-generativelanguage/docs/generativelanguage_v1beta/model_service.rst new file mode 100644 index 000000000000..f656fafbec03 --- /dev/null +++ b/packages/google-ai-generativelanguage/docs/generativelanguage_v1beta/model_service.rst @@ -0,0 +1,10 @@ +ModelService +------------------------------ + +.. automodule:: google.ai.generativelanguage_v1beta.services.model_service + :members: + :inherited-members: + +.. automodule:: google.ai.generativelanguage_v1beta.services.model_service.pagers + :members: + :inherited-members: diff --git a/packages/google-ai-generativelanguage/docs/generativelanguage_v1beta/permission_service.rst b/packages/google-ai-generativelanguage/docs/generativelanguage_v1beta/permission_service.rst new file mode 100644 index 000000000000..9c14fefd538a --- /dev/null +++ b/packages/google-ai-generativelanguage/docs/generativelanguage_v1beta/permission_service.rst @@ -0,0 +1,10 @@ +PermissionService +----------------------------------- + +.. automodule:: google.ai.generativelanguage_v1beta.services.permission_service + :members: + :inherited-members: + +.. automodule:: google.ai.generativelanguage_v1beta.services.permission_service.pagers + :members: + :inherited-members: diff --git a/packages/google-ai-generativelanguage/docs/generativelanguage_v1beta/retriever_service.rst b/packages/google-ai-generativelanguage/docs/generativelanguage_v1beta/retriever_service.rst new file mode 100644 index 000000000000..704e781a41fc --- /dev/null +++ b/packages/google-ai-generativelanguage/docs/generativelanguage_v1beta/retriever_service.rst @@ -0,0 +1,10 @@ +RetrieverService +---------------------------------- + +.. automodule:: google.ai.generativelanguage_v1beta.services.retriever_service + :members: + :inherited-members: + +.. automodule:: google.ai.generativelanguage_v1beta.services.retriever_service.pagers + :members: + :inherited-members: diff --git a/packages/google-ai-generativelanguage/docs/generativelanguage_v1beta/services_.rst b/packages/google-ai-generativelanguage/docs/generativelanguage_v1beta/services_.rst new file mode 100644 index 000000000000..2826bf7a6a6a --- /dev/null +++ b/packages/google-ai-generativelanguage/docs/generativelanguage_v1beta/services_.rst @@ -0,0 +1,11 @@ +Services for Google Ai Generativelanguage v1beta API +==================================================== +.. toctree:: + :maxdepth: 2 + + discuss_service + generative_service + model_service + permission_service + retriever_service + text_service diff --git a/packages/google-ai-generativelanguage/docs/generativelanguage_v1beta/text_service.rst b/packages/google-ai-generativelanguage/docs/generativelanguage_v1beta/text_service.rst new file mode 100644 index 000000000000..4b17617a0273 --- /dev/null +++ b/packages/google-ai-generativelanguage/docs/generativelanguage_v1beta/text_service.rst @@ -0,0 +1,6 @@ +TextService +----------------------------- + +.. automodule:: google.ai.generativelanguage_v1beta.services.text_service + :members: + :inherited-members: diff --git a/packages/google-ai-generativelanguage/docs/generativelanguage_v1beta/types_.rst b/packages/google-ai-generativelanguage/docs/generativelanguage_v1beta/types_.rst new file mode 100644 index 000000000000..a5ac73b8a0d5 --- /dev/null +++ b/packages/google-ai-generativelanguage/docs/generativelanguage_v1beta/types_.rst @@ -0,0 +1,6 @@ +Types for Google Ai Generativelanguage v1beta API +================================================= + +.. automodule:: google.ai.generativelanguage_v1beta.types + :members: + :show-inheritance: diff --git a/packages/google-ai-generativelanguage/docs/index.rst b/packages/google-ai-generativelanguage/docs/index.rst index 51e1b5474cd2..5688bf71543b 100644 --- a/packages/google-ai-generativelanguage/docs/index.rst +++ b/packages/google-ai-generativelanguage/docs/index.rst @@ -3,7 +3,7 @@ .. include:: multiprocessing.rst This package includes clients for multiple versions of Generative Language API. -By default, you will get version ``generativelanguage_v1beta3``. +By default, you will get version ``generativelanguage_v1beta``. API Reference @@ -11,8 +11,16 @@ API Reference .. toctree:: :maxdepth: 2 - generativelanguage_v1beta3/services_ - generativelanguage_v1beta3/types_ + generativelanguage_v1beta/services_ + generativelanguage_v1beta/types_ + +API Reference +------------- +.. toctree:: + :maxdepth: 2 + + generativelanguage_v1/services_ + generativelanguage_v1/types_ API Reference ------------- @@ -22,6 +30,14 @@ API Reference generativelanguage_v1beta2/services_ generativelanguage_v1beta2/types_ +API Reference +------------- +.. toctree:: + :maxdepth: 2 + + generativelanguage_v1beta3/services_ + generativelanguage_v1beta3/types_ + Changelog --------- diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage/__init__.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage/__init__.py index f81250e3fbf4..8201c76ab661 100644 --- a/packages/google-ai-generativelanguage/google/ai/generativelanguage/__init__.py +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage/__init__.py @@ -18,35 +18,60 @@ __version__ = package_version.__version__ -from google.ai.generativelanguage_v1beta3.services.discuss_service.async_client import ( +from google.ai.generativelanguage_v1beta.services.discuss_service.async_client import ( DiscussServiceAsyncClient, ) -from google.ai.generativelanguage_v1beta3.services.discuss_service.client import ( +from google.ai.generativelanguage_v1beta.services.discuss_service.client import ( DiscussServiceClient, ) -from google.ai.generativelanguage_v1beta3.services.model_service.async_client import ( +from google.ai.generativelanguage_v1beta.services.generative_service.async_client import ( + GenerativeServiceAsyncClient, +) +from google.ai.generativelanguage_v1beta.services.generative_service.client import ( + GenerativeServiceClient, +) +from google.ai.generativelanguage_v1beta.services.model_service.async_client import ( ModelServiceAsyncClient, ) -from google.ai.generativelanguage_v1beta3.services.model_service.client import ( +from google.ai.generativelanguage_v1beta.services.model_service.client import ( ModelServiceClient, ) -from google.ai.generativelanguage_v1beta3.services.permission_service.async_client import ( +from google.ai.generativelanguage_v1beta.services.permission_service.async_client import ( PermissionServiceAsyncClient, ) -from google.ai.generativelanguage_v1beta3.services.permission_service.client import ( +from google.ai.generativelanguage_v1beta.services.permission_service.client import ( PermissionServiceClient, ) -from google.ai.generativelanguage_v1beta3.services.text_service.async_client import ( +from google.ai.generativelanguage_v1beta.services.retriever_service.async_client import ( + RetrieverServiceAsyncClient, +) +from google.ai.generativelanguage_v1beta.services.retriever_service.client import ( + RetrieverServiceClient, +) +from google.ai.generativelanguage_v1beta.services.text_service.async_client import ( TextServiceAsyncClient, ) -from google.ai.generativelanguage_v1beta3.services.text_service.client import ( +from google.ai.generativelanguage_v1beta.services.text_service.client import ( TextServiceClient, ) -from google.ai.generativelanguage_v1beta3.types.citation import ( +from google.ai.generativelanguage_v1beta.types.citation import ( CitationMetadata, CitationSource, ) -from google.ai.generativelanguage_v1beta3.types.discuss_service import ( +from google.ai.generativelanguage_v1beta.types.content import ( + Blob, + Content, + FunctionCall, + FunctionDeclaration, + FunctionResponse, + GroundingPassage, + GroundingPassages, + Part, + Schema, + Tool, + Type, +) +from google.ai.generativelanguage_v1beta.types.discuss_service import ( CountMessageTokensRequest, CountMessageTokensResponse, Example, @@ -55,8 +80,27 @@ Message, MessagePrompt, ) -from google.ai.generativelanguage_v1beta3.types.model import Model -from google.ai.generativelanguage_v1beta3.types.model_service import ( +from google.ai.generativelanguage_v1beta.types.generative_service import ( + AttributionSourceId, + BatchEmbedContentsRequest, + BatchEmbedContentsResponse, + Candidate, + ContentEmbedding, + CountTokensRequest, + CountTokensResponse, + EmbedContentRequest, + EmbedContentResponse, + GenerateAnswerRequest, + GenerateAnswerResponse, + GenerateContentRequest, + GenerateContentResponse, + GenerationConfig, + GroundingAttribution, + SemanticRetrieverConfig, + TaskType, +) +from google.ai.generativelanguage_v1beta.types.model import Model +from google.ai.generativelanguage_v1beta.types.model_service import ( CreateTunedModelMetadata, CreateTunedModelRequest, DeleteTunedModelRequest, @@ -68,8 +112,8 @@ ListTunedModelsResponse, UpdateTunedModelRequest, ) -from google.ai.generativelanguage_v1beta3.types.permission import Permission -from google.ai.generativelanguage_v1beta3.types.permission_service import ( +from google.ai.generativelanguage_v1beta.types.permission import Permission +from google.ai.generativelanguage_v1beta.types.permission_service import ( CreatePermissionRequest, DeletePermissionRequest, GetPermissionRequest, @@ -79,14 +123,54 @@ TransferOwnershipResponse, UpdatePermissionRequest, ) -from google.ai.generativelanguage_v1beta3.types.safety import ( +from google.ai.generativelanguage_v1beta.types.retriever import ( + Chunk, + ChunkData, + Condition, + Corpus, + CustomMetadata, + Document, + MetadataFilter, + StringList, +) +from google.ai.generativelanguage_v1beta.types.retriever_service import ( + BatchCreateChunksRequest, + BatchCreateChunksResponse, + BatchDeleteChunksRequest, + BatchUpdateChunksRequest, + BatchUpdateChunksResponse, + CreateChunkRequest, + CreateCorpusRequest, + CreateDocumentRequest, + DeleteChunkRequest, + DeleteCorpusRequest, + DeleteDocumentRequest, + GetChunkRequest, + GetCorpusRequest, + GetDocumentRequest, + ListChunksRequest, + ListChunksResponse, + ListCorporaRequest, + ListCorporaResponse, + ListDocumentsRequest, + ListDocumentsResponse, + QueryCorpusRequest, + QueryCorpusResponse, + QueryDocumentRequest, + QueryDocumentResponse, + RelevantChunk, + UpdateChunkRequest, + UpdateCorpusRequest, + UpdateDocumentRequest, +) +from google.ai.generativelanguage_v1beta.types.safety import ( ContentFilter, HarmCategory, SafetyFeedback, SafetyRating, SafetySetting, ) -from google.ai.generativelanguage_v1beta3.types.text_service import ( +from google.ai.generativelanguage_v1beta.types.text_service import ( BatchEmbedTextRequest, BatchEmbedTextResponse, CountTextTokensRequest, @@ -99,7 +183,7 @@ TextCompletion, TextPrompt, ) -from google.ai.generativelanguage_v1beta3.types.tuned_model import ( +from google.ai.generativelanguage_v1beta.types.tuned_model import ( Dataset, Hyperparameters, TunedModel, @@ -113,14 +197,29 @@ __all__ = ( "DiscussServiceClient", "DiscussServiceAsyncClient", + "GenerativeServiceClient", + "GenerativeServiceAsyncClient", "ModelServiceClient", "ModelServiceAsyncClient", "PermissionServiceClient", "PermissionServiceAsyncClient", + "RetrieverServiceClient", + "RetrieverServiceAsyncClient", "TextServiceClient", "TextServiceAsyncClient", "CitationMetadata", "CitationSource", + "Blob", + "Content", + "FunctionCall", + "FunctionDeclaration", + "FunctionResponse", + "GroundingPassage", + "GroundingPassages", + "Part", + "Schema", + "Tool", + "Type", "CountMessageTokensRequest", "CountMessageTokensResponse", "Example", @@ -128,6 +227,23 @@ "GenerateMessageResponse", "Message", "MessagePrompt", + "AttributionSourceId", + "BatchEmbedContentsRequest", + "BatchEmbedContentsResponse", + "Candidate", + "ContentEmbedding", + "CountTokensRequest", + "CountTokensResponse", + "EmbedContentRequest", + "EmbedContentResponse", + "GenerateAnswerRequest", + "GenerateAnswerResponse", + "GenerateContentRequest", + "GenerateContentResponse", + "GenerationConfig", + "GroundingAttribution", + "SemanticRetrieverConfig", + "TaskType", "Model", "CreateTunedModelMetadata", "CreateTunedModelRequest", @@ -148,6 +264,42 @@ "TransferOwnershipRequest", "TransferOwnershipResponse", "UpdatePermissionRequest", + "Chunk", + "ChunkData", + "Condition", + "Corpus", + "CustomMetadata", + "Document", + "MetadataFilter", + "StringList", + "BatchCreateChunksRequest", + "BatchCreateChunksResponse", + "BatchDeleteChunksRequest", + "BatchUpdateChunksRequest", + "BatchUpdateChunksResponse", + "CreateChunkRequest", + "CreateCorpusRequest", + "CreateDocumentRequest", + "DeleteChunkRequest", + "DeleteCorpusRequest", + "DeleteDocumentRequest", + "GetChunkRequest", + "GetCorpusRequest", + "GetDocumentRequest", + "ListChunksRequest", + "ListChunksResponse", + "ListCorporaRequest", + "ListCorporaResponse", + "ListDocumentsRequest", + "ListDocumentsResponse", + "QueryCorpusRequest", + "QueryCorpusResponse", + "QueryDocumentRequest", + "QueryDocumentResponse", + "RelevantChunk", + "UpdateChunkRequest", + "UpdateCorpusRequest", + "UpdateDocumentRequest", "ContentFilter", "SafetyFeedback", "SafetyRating", diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage/gapic_version.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage/gapic_version.py index 288d10b11145..360a0d13ebdd 100644 --- a/packages/google-ai-generativelanguage/google/ai/generativelanguage/gapic_version.py +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "0.3.5" # {x-release-please-version} +__version__ = "0.0.0" # {x-release-please-version} diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/__init__.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/__init__.py new file mode 100644 index 000000000000..ee4612eef573 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/__init__.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from google.ai.generativelanguage_v1 import gapic_version as package_version + +__version__ = package_version.__version__ + + +from .services.generative_service import ( + GenerativeServiceAsyncClient, + GenerativeServiceClient, +) +from .services.model_service import ModelServiceAsyncClient, ModelServiceClient +from .types.citation import CitationMetadata, CitationSource +from .types.content import Blob, Content, Part +from .types.generative_service import ( + BatchEmbedContentsRequest, + BatchEmbedContentsResponse, + Candidate, + ContentEmbedding, + CountTokensRequest, + CountTokensResponse, + EmbedContentRequest, + EmbedContentResponse, + GenerateContentRequest, + GenerateContentResponse, + GenerationConfig, + TaskType, +) +from .types.model import Model +from .types.model_service import GetModelRequest, ListModelsRequest, ListModelsResponse +from .types.safety import HarmCategory, SafetyRating, SafetySetting + +__all__ = ( + "GenerativeServiceAsyncClient", + "ModelServiceAsyncClient", + "BatchEmbedContentsRequest", + "BatchEmbedContentsResponse", + "Blob", + "Candidate", + "CitationMetadata", + "CitationSource", + "Content", + "ContentEmbedding", + "CountTokensRequest", + "CountTokensResponse", + "EmbedContentRequest", + "EmbedContentResponse", + "GenerateContentRequest", + "GenerateContentResponse", + "GenerationConfig", + "GenerativeServiceClient", + "GetModelRequest", + "HarmCategory", + "ListModelsRequest", + "ListModelsResponse", + "Model", + "ModelServiceClient", + "Part", + "SafetyRating", + "SafetySetting", + "TaskType", +) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/gapic_metadata.json b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/gapic_metadata.json new file mode 100644 index 000000000000..77717e17c5dc --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/gapic_metadata.json @@ -0,0 +1,152 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.ai.generativelanguage_v1", + "protoPackage": "google.ai.generativelanguage.v1", + "schema": "1.0", + "services": { + "GenerativeService": { + "clients": { + "grpc": { + "libraryClient": "GenerativeServiceClient", + "rpcs": { + "BatchEmbedContents": { + "methods": [ + "batch_embed_contents" + ] + }, + "CountTokens": { + "methods": [ + "count_tokens" + ] + }, + "EmbedContent": { + "methods": [ + "embed_content" + ] + }, + "GenerateContent": { + "methods": [ + "generate_content" + ] + }, + "StreamGenerateContent": { + "methods": [ + "stream_generate_content" + ] + } + } + }, + "grpc-async": { + "libraryClient": "GenerativeServiceAsyncClient", + "rpcs": { + "BatchEmbedContents": { + "methods": [ + "batch_embed_contents" + ] + }, + "CountTokens": { + "methods": [ + "count_tokens" + ] + }, + "EmbedContent": { + "methods": [ + "embed_content" + ] + }, + "GenerateContent": { + "methods": [ + "generate_content" + ] + }, + "StreamGenerateContent": { + "methods": [ + "stream_generate_content" + ] + } + } + }, + "rest": { + "libraryClient": "GenerativeServiceClient", + "rpcs": { + "BatchEmbedContents": { + "methods": [ + "batch_embed_contents" + ] + }, + "CountTokens": { + "methods": [ + "count_tokens" + ] + }, + "EmbedContent": { + "methods": [ + "embed_content" + ] + }, + "GenerateContent": { + "methods": [ + "generate_content" + ] + }, + "StreamGenerateContent": { + "methods": [ + "stream_generate_content" + ] + } + } + } + } + }, + "ModelService": { + "clients": { + "grpc": { + "libraryClient": "ModelServiceClient", + "rpcs": { + "GetModel": { + "methods": [ + "get_model" + ] + }, + "ListModels": { + "methods": [ + "list_models" + ] + } + } + }, + "grpc-async": { + "libraryClient": "ModelServiceAsyncClient", + "rpcs": { + "GetModel": { + "methods": [ + "get_model" + ] + }, + "ListModels": { + "methods": [ + "list_models" + ] + } + } + }, + "rest": { + "libraryClient": "ModelServiceClient", + "rpcs": { + "GetModel": { + "methods": [ + "get_model" + ] + }, + "ListModels": { + "methods": [ + "list_models" + ] + } + } + } + } + } + } +} diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/gapic_version.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/gapic_version.py new file mode 100644 index 000000000000..360a0d13ebdd --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/gapic_version.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +__version__ = "0.0.0" # {x-release-please-version} diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/py.typed b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/py.typed new file mode 100644 index 000000000000..38773eee6363 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-ai-generativelanguage package uses inline types. diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/__init__.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/__init__.py new file mode 100644 index 000000000000..89a37dc92c5a --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/generative_service/__init__.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/generative_service/__init__.py new file mode 100644 index 000000000000..1e92ad575a7b --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/generative_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .async_client import GenerativeServiceAsyncClient +from .client import GenerativeServiceClient + +__all__ = ( + "GenerativeServiceClient", + "GenerativeServiceAsyncClient", +) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/generative_service/async_client.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/generative_service/async_client.py new file mode 100644 index 000000000000..155abc59f941 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/generative_service/async_client.py @@ -0,0 +1,1069 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import ( + AsyncIterable, + Awaitable, + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry_async as retries +from google.api_core.client_options import ClientOptions +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.ai.generativelanguage_v1 import gapic_version as package_version + +try: + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore + +from google.longrunning import operations_pb2 # type: ignore + +from google.ai.generativelanguage_v1.types import content +from google.ai.generativelanguage_v1.types import content as gag_content +from google.ai.generativelanguage_v1.types import generative_service + +from .client import GenerativeServiceClient +from .transports.base import DEFAULT_CLIENT_INFO, GenerativeServiceTransport +from .transports.grpc_asyncio import GenerativeServiceGrpcAsyncIOTransport + + +class GenerativeServiceAsyncClient: + """API for using Large Models that generate multimodal content + and have additional capabilities beyond text generation. + """ + + _client: GenerativeServiceClient + + DEFAULT_ENDPOINT = GenerativeServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = GenerativeServiceClient.DEFAULT_MTLS_ENDPOINT + + model_path = staticmethod(GenerativeServiceClient.model_path) + parse_model_path = staticmethod(GenerativeServiceClient.parse_model_path) + common_billing_account_path = staticmethod( + GenerativeServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + GenerativeServiceClient.parse_common_billing_account_path + ) + common_folder_path = staticmethod(GenerativeServiceClient.common_folder_path) + parse_common_folder_path = staticmethod( + GenerativeServiceClient.parse_common_folder_path + ) + common_organization_path = staticmethod( + GenerativeServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + GenerativeServiceClient.parse_common_organization_path + ) + common_project_path = staticmethod(GenerativeServiceClient.common_project_path) + parse_common_project_path = staticmethod( + GenerativeServiceClient.parse_common_project_path + ) + common_location_path = staticmethod(GenerativeServiceClient.common_location_path) + parse_common_location_path = staticmethod( + GenerativeServiceClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + GenerativeServiceAsyncClient: The constructed client. + """ + return GenerativeServiceClient.from_service_account_info.__func__(GenerativeServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + GenerativeServiceAsyncClient: The constructed client. + """ + return GenerativeServiceClient.from_service_account_file.__func__(GenerativeServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return GenerativeServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> GenerativeServiceTransport: + """Returns the transport used by the client instance. + + Returns: + GenerativeServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial( + type(GenerativeServiceClient).get_transport_class, type(GenerativeServiceClient) + ) + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, GenerativeServiceTransport] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the generative service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.GenerativeServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = GenerativeServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def generate_content( + self, + request: Optional[ + Union[generative_service.GenerateContentRequest, dict] + ] = None, + *, + model: Optional[str] = None, + contents: Optional[MutableSequence[content.Content]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> generative_service.GenerateContentResponse: + r"""Generates a response from the model given an input + ``GenerateContentRequest``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1 + + async def sample_generate_content(): + # Create a client + client = generativelanguage_v1.GenerativeServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1.GenerateContentRequest( + model="model_value", + ) + + # Make the request + response = await client.generate_content(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1.types.GenerateContentRequest, dict]]): + The request object. Request to generate a completion from + the model. + model (:class:`str`): + Required. The name of the ``Model`` to use for + generating the completion. + + Format: ``name=models/{model}``. + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + contents (:class:`MutableSequence[google.ai.generativelanguage_v1.types.Content]`): + Required. The content of the current + conversation with the model. + For single-turn queries, this is a + single instance. For multi-turn queries, + this is a repeated field that contains + conversation history + latest request. + + This corresponds to the ``contents`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1.types.GenerateContentResponse: + Response from the model supporting multiple candidates. + + Note on safety ratings and content filtering. They + are reported for both prompt in + GenerateContentResponse.prompt_feedback and for each + candidate in finish_reason and in safety_ratings. The + API contract is that: - either all requested + candidates are returned or no candidates at all - no + candidates are returned only if there was something + wrong with the prompt (see prompt_feedback) - + feedback on each candidate is reported on + finish_reason and safety_ratings. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, contents]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = generative_service.GenerateContentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if contents: + request.contents.extend(contents) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.generate_content, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def stream_generate_content( + self, + request: Optional[ + Union[generative_service.GenerateContentRequest, dict] + ] = None, + *, + model: Optional[str] = None, + contents: Optional[MutableSequence[content.Content]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Awaitable[AsyncIterable[generative_service.GenerateContentResponse]]: + r"""Generates a streamed response from the model given an input + ``GenerateContentRequest``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1 + + async def sample_stream_generate_content(): + # Create a client + client = generativelanguage_v1.GenerativeServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1.GenerateContentRequest( + model="model_value", + ) + + # Make the request + stream = await client.stream_generate_content(request=request) + + # Handle the response + async for response in stream: + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1.types.GenerateContentRequest, dict]]): + The request object. Request to generate a completion from + the model. + model (:class:`str`): + Required. The name of the ``Model`` to use for + generating the completion. + + Format: ``name=models/{model}``. + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + contents (:class:`MutableSequence[google.ai.generativelanguage_v1.types.Content]`): + Required. The content of the current + conversation with the model. + For single-turn queries, this is a + single instance. For multi-turn queries, + this is a repeated field that contains + conversation history + latest request. + + This corresponds to the ``contents`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + AsyncIterable[google.ai.generativelanguage_v1.types.GenerateContentResponse]: + Response from the model supporting multiple candidates. + + Note on safety ratings and content filtering. They + are reported for both prompt in + GenerateContentResponse.prompt_feedback and for each + candidate in finish_reason and in safety_ratings. The + API contract is that: - either all requested + candidates are returned or no candidates at all - no + candidates are returned only if there was something + wrong with the prompt (see prompt_feedback) - + feedback on each candidate is reported on + finish_reason and safety_ratings. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, contents]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = generative_service.GenerateContentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if contents: + request.contents.extend(contents) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.stream_generate_content, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def embed_content( + self, + request: Optional[Union[generative_service.EmbedContentRequest, dict]] = None, + *, + model: Optional[str] = None, + content: Optional[gag_content.Content] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> generative_service.EmbedContentResponse: + r"""Generates an embedding from the model given an input + ``Content``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1 + + async def sample_embed_content(): + # Create a client + client = generativelanguage_v1.GenerativeServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1.EmbedContentRequest( + model="model_value", + ) + + # Make the request + response = await client.embed_content(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1.types.EmbedContentRequest, dict]]): + The request object. Request containing the ``Content`` for the model to + embed. + model (:class:`str`): + Required. The model's resource name. This serves as an + ID for the Model to use. + + This name should match a model name returned by the + ``ListModels`` method. + + Format: ``models/{model}`` + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + content (:class:`google.ai.generativelanguage_v1.types.Content`): + Required. The content to embed. Only the ``parts.text`` + fields will be counted. + + This corresponds to the ``content`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1.types.EmbedContentResponse: + The response to an EmbedContentRequest. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, content]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = generative_service.EmbedContentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if content is not None: + request.content = content + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.embed_content, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def batch_embed_contents( + self, + request: Optional[ + Union[generative_service.BatchEmbedContentsRequest, dict] + ] = None, + *, + model: Optional[str] = None, + requests: Optional[ + MutableSequence[generative_service.EmbedContentRequest] + ] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> generative_service.BatchEmbedContentsResponse: + r"""Generates multiple embeddings from the model given + input text in a synchronous call. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1 + + async def sample_batch_embed_contents(): + # Create a client + client = generativelanguage_v1.GenerativeServiceAsyncClient() + + # Initialize request argument(s) + requests = generativelanguage_v1.EmbedContentRequest() + requests.model = "model_value" + + request = generativelanguage_v1.BatchEmbedContentsRequest( + model="model_value", + requests=requests, + ) + + # Make the request + response = await client.batch_embed_contents(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1.types.BatchEmbedContentsRequest, dict]]): + The request object. Batch request to get embeddings from + the model for a list of prompts. + model (:class:`str`): + Required. The model's resource name. This serves as an + ID for the Model to use. + + This name should match a model name returned by the + ``ListModels`` method. + + Format: ``models/{model}`` + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + requests (:class:`MutableSequence[google.ai.generativelanguage_v1.types.EmbedContentRequest]`): + Required. Embed requests for the batch. The model in + each of these requests must match the model specified + ``BatchEmbedContentsRequest.model``. + + This corresponds to the ``requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1.types.BatchEmbedContentsResponse: + The response to a BatchEmbedContentsRequest. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, requests]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = generative_service.BatchEmbedContentsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if requests: + request.requests.extend(requests) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_embed_contents, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def count_tokens( + self, + request: Optional[Union[generative_service.CountTokensRequest, dict]] = None, + *, + model: Optional[str] = None, + contents: Optional[MutableSequence[content.Content]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> generative_service.CountTokensResponse: + r"""Runs a model's tokenizer on input content and returns + the token count. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1 + + async def sample_count_tokens(): + # Create a client + client = generativelanguage_v1.GenerativeServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1.CountTokensRequest( + model="model_value", + ) + + # Make the request + response = await client.count_tokens(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1.types.CountTokensRequest, dict]]): + The request object. Counts the number of tokens in the ``prompt`` sent to a + model. + + Models may tokenize text differently, so each model may + return a different ``token_count``. + model (:class:`str`): + Required. The model's resource name. This serves as an + ID for the Model to use. + + This name should match a model name returned by the + ``ListModels`` method. + + Format: ``models/{model}`` + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + contents (:class:`MutableSequence[google.ai.generativelanguage_v1.types.Content]`): + Required. The input given to the + model as a prompt. + + This corresponds to the ``contents`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1.types.CountTokensResponse: + A response from CountTokens. + + It returns the model's token_count for the prompt. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, contents]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = generative_service.CountTokensRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if contents: + request.contents.extend(contents) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.count_tokens, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def __aenter__(self) -> "GenerativeServiceAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("GenerativeServiceAsyncClient",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/generative_service/client.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/generative_service/client.py new file mode 100644 index 000000000000..8d13ebaceba5 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/generative_service/client.py @@ -0,0 +1,1252 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import ( + Dict, + Iterable, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.ai.generativelanguage_v1 import gapic_version as package_version + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.longrunning import operations_pb2 # type: ignore + +from google.ai.generativelanguage_v1.types import content +from google.ai.generativelanguage_v1.types import content as gag_content +from google.ai.generativelanguage_v1.types import generative_service + +from .transports.base import DEFAULT_CLIENT_INFO, GenerativeServiceTransport +from .transports.grpc import GenerativeServiceGrpcTransport +from .transports.grpc_asyncio import GenerativeServiceGrpcAsyncIOTransport +from .transports.rest import GenerativeServiceRestTransport + + +class GenerativeServiceClientMeta(type): + """Metaclass for the GenerativeService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[GenerativeServiceTransport]] + _transport_registry["grpc"] = GenerativeServiceGrpcTransport + _transport_registry["grpc_asyncio"] = GenerativeServiceGrpcAsyncIOTransport + _transport_registry["rest"] = GenerativeServiceRestTransport + + def get_transport_class( + cls, + label: Optional[str] = None, + ) -> Type[GenerativeServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class GenerativeServiceClient(metaclass=GenerativeServiceClientMeta): + """API for using Large Models that generate multimodal content + and have additional capabilities beyond text generation. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "generativelanguage.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + GenerativeServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + GenerativeServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> GenerativeServiceTransport: + """Returns the transport used by the client instance. + + Returns: + GenerativeServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def model_path( + model: str, + ) -> str: + """Returns a fully-qualified model string.""" + return "models/{model}".format( + model=model, + ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str, str]: + """Parses a model path into its component segments.""" + m = re.match(r"^models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path( + billing_account: str, + ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path( + folder: str, + ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format( + folder=folder, + ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path( + organization: str, + ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format( + organization=organization, + ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path( + project: str, + ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format( + project=project, + ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path( + project: str, + location: str, + ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[client_options_lib.ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[Union[str, GenerativeServiceTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the generative service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, GenerativeServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + client_options = cast(client_options_lib.ClientOptions, client_options) + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( + client_options + ) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError( + "client_options.api_key and credentials are mutually exclusive" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, GenerativeServiceTransport): + # transport is a GenerativeServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr( + google.auth._default, "get_api_key_credentials" + ): + credentials = google.auth._default.get_api_key_credentials( + api_key_value + ) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=client_options.api_audience, + ) + + def generate_content( + self, + request: Optional[ + Union[generative_service.GenerateContentRequest, dict] + ] = None, + *, + model: Optional[str] = None, + contents: Optional[MutableSequence[content.Content]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> generative_service.GenerateContentResponse: + r"""Generates a response from the model given an input + ``GenerateContentRequest``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1 + + def sample_generate_content(): + # Create a client + client = generativelanguage_v1.GenerativeServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1.GenerateContentRequest( + model="model_value", + ) + + # Make the request + response = client.generate_content(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1.types.GenerateContentRequest, dict]): + The request object. Request to generate a completion from + the model. + model (str): + Required. The name of the ``Model`` to use for + generating the completion. + + Format: ``name=models/{model}``. + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + contents (MutableSequence[google.ai.generativelanguage_v1.types.Content]): + Required. The content of the current + conversation with the model. + For single-turn queries, this is a + single instance. For multi-turn queries, + this is a repeated field that contains + conversation history + latest request. + + This corresponds to the ``contents`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1.types.GenerateContentResponse: + Response from the model supporting multiple candidates. + + Note on safety ratings and content filtering. They + are reported for both prompt in + GenerateContentResponse.prompt_feedback and for each + candidate in finish_reason and in safety_ratings. The + API contract is that: - either all requested + candidates are returned or no candidates at all - no + candidates are returned only if there was something + wrong with the prompt (see prompt_feedback) - + feedback on each candidate is reported on + finish_reason and safety_ratings. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, contents]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a generative_service.GenerateContentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, generative_service.GenerateContentRequest): + request = generative_service.GenerateContentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if contents is not None: + request.contents = contents + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.generate_content] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def stream_generate_content( + self, + request: Optional[ + Union[generative_service.GenerateContentRequest, dict] + ] = None, + *, + model: Optional[str] = None, + contents: Optional[MutableSequence[content.Content]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Iterable[generative_service.GenerateContentResponse]: + r"""Generates a streamed response from the model given an input + ``GenerateContentRequest``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1 + + def sample_stream_generate_content(): + # Create a client + client = generativelanguage_v1.GenerativeServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1.GenerateContentRequest( + model="model_value", + ) + + # Make the request + stream = client.stream_generate_content(request=request) + + # Handle the response + for response in stream: + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1.types.GenerateContentRequest, dict]): + The request object. Request to generate a completion from + the model. + model (str): + Required. The name of the ``Model`` to use for + generating the completion. + + Format: ``name=models/{model}``. + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + contents (MutableSequence[google.ai.generativelanguage_v1.types.Content]): + Required. The content of the current + conversation with the model. + For single-turn queries, this is a + single instance. For multi-turn queries, + this is a repeated field that contains + conversation history + latest request. + + This corresponds to the ``contents`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + Iterable[google.ai.generativelanguage_v1.types.GenerateContentResponse]: + Response from the model supporting multiple candidates. + + Note on safety ratings and content filtering. They + are reported for both prompt in + GenerateContentResponse.prompt_feedback and for each + candidate in finish_reason and in safety_ratings. The + API contract is that: - either all requested + candidates are returned or no candidates at all - no + candidates are returned only if there was something + wrong with the prompt (see prompt_feedback) - + feedback on each candidate is reported on + finish_reason and safety_ratings. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, contents]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a generative_service.GenerateContentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, generative_service.GenerateContentRequest): + request = generative_service.GenerateContentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if contents is not None: + request.contents = contents + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.stream_generate_content] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def embed_content( + self, + request: Optional[Union[generative_service.EmbedContentRequest, dict]] = None, + *, + model: Optional[str] = None, + content: Optional[gag_content.Content] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> generative_service.EmbedContentResponse: + r"""Generates an embedding from the model given an input + ``Content``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1 + + def sample_embed_content(): + # Create a client + client = generativelanguage_v1.GenerativeServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1.EmbedContentRequest( + model="model_value", + ) + + # Make the request + response = client.embed_content(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1.types.EmbedContentRequest, dict]): + The request object. Request containing the ``Content`` for the model to + embed. + model (str): + Required. The model's resource name. This serves as an + ID for the Model to use. + + This name should match a model name returned by the + ``ListModels`` method. + + Format: ``models/{model}`` + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + content (google.ai.generativelanguage_v1.types.Content): + Required. The content to embed. Only the ``parts.text`` + fields will be counted. + + This corresponds to the ``content`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1.types.EmbedContentResponse: + The response to an EmbedContentRequest. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, content]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a generative_service.EmbedContentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, generative_service.EmbedContentRequest): + request = generative_service.EmbedContentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if content is not None: + request.content = content + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.embed_content] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def batch_embed_contents( + self, + request: Optional[ + Union[generative_service.BatchEmbedContentsRequest, dict] + ] = None, + *, + model: Optional[str] = None, + requests: Optional[ + MutableSequence[generative_service.EmbedContentRequest] + ] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> generative_service.BatchEmbedContentsResponse: + r"""Generates multiple embeddings from the model given + input text in a synchronous call. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1 + + def sample_batch_embed_contents(): + # Create a client + client = generativelanguage_v1.GenerativeServiceClient() + + # Initialize request argument(s) + requests = generativelanguage_v1.EmbedContentRequest() + requests.model = "model_value" + + request = generativelanguage_v1.BatchEmbedContentsRequest( + model="model_value", + requests=requests, + ) + + # Make the request + response = client.batch_embed_contents(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1.types.BatchEmbedContentsRequest, dict]): + The request object. Batch request to get embeddings from + the model for a list of prompts. + model (str): + Required. The model's resource name. This serves as an + ID for the Model to use. + + This name should match a model name returned by the + ``ListModels`` method. + + Format: ``models/{model}`` + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + requests (MutableSequence[google.ai.generativelanguage_v1.types.EmbedContentRequest]): + Required. Embed requests for the batch. The model in + each of these requests must match the model specified + ``BatchEmbedContentsRequest.model``. + + This corresponds to the ``requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1.types.BatchEmbedContentsResponse: + The response to a BatchEmbedContentsRequest. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, requests]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a generative_service.BatchEmbedContentsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, generative_service.BatchEmbedContentsRequest): + request = generative_service.BatchEmbedContentsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if requests is not None: + request.requests = requests + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.batch_embed_contents] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def count_tokens( + self, + request: Optional[Union[generative_service.CountTokensRequest, dict]] = None, + *, + model: Optional[str] = None, + contents: Optional[MutableSequence[content.Content]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> generative_service.CountTokensResponse: + r"""Runs a model's tokenizer on input content and returns + the token count. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1 + + def sample_count_tokens(): + # Create a client + client = generativelanguage_v1.GenerativeServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1.CountTokensRequest( + model="model_value", + ) + + # Make the request + response = client.count_tokens(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1.types.CountTokensRequest, dict]): + The request object. Counts the number of tokens in the ``prompt`` sent to a + model. + + Models may tokenize text differently, so each model may + return a different ``token_count``. + model (str): + Required. The model's resource name. This serves as an + ID for the Model to use. + + This name should match a model name returned by the + ``ListModels`` method. + + Format: ``models/{model}`` + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + contents (MutableSequence[google.ai.generativelanguage_v1.types.Content]): + Required. The input given to the + model as a prompt. + + This corresponds to the ``contents`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1.types.CountTokensResponse: + A response from CountTokens. + + It returns the model's token_count for the prompt. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, contents]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a generative_service.CountTokensRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, generative_service.CountTokensRequest): + request = generative_service.CountTokensRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if contents is not None: + request.contents = contents + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.count_tokens] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self) -> "GenerativeServiceClient": + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("GenerativeServiceClient",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/generative_service/transports/__init__.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/generative_service/transports/__init__.py new file mode 100644 index 000000000000..1d35da543a1e --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/generative_service/transports/__init__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import GenerativeServiceTransport +from .grpc import GenerativeServiceGrpcTransport +from .grpc_asyncio import GenerativeServiceGrpcAsyncIOTransport +from .rest import GenerativeServiceRestInterceptor, GenerativeServiceRestTransport + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[GenerativeServiceTransport]] +_transport_registry["grpc"] = GenerativeServiceGrpcTransport +_transport_registry["grpc_asyncio"] = GenerativeServiceGrpcAsyncIOTransport +_transport_registry["rest"] = GenerativeServiceRestTransport + +__all__ = ( + "GenerativeServiceTransport", + "GenerativeServiceGrpcTransport", + "GenerativeServiceGrpcAsyncIOTransport", + "GenerativeServiceRestTransport", + "GenerativeServiceRestInterceptor", +) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/generative_service/transports/base.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/generative_service/transports/base.py new file mode 100644 index 000000000000..93ff928d4f57 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/generative_service/transports/base.py @@ -0,0 +1,299 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.ai.generativelanguage_v1 import gapic_version as package_version +from google.ai.generativelanguage_v1.types import generative_service + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +class GenerativeServiceTransport(abc.ABC): + """Abstract transport class for GenerativeService.""" + + AUTH_SCOPES = () + + DEFAULT_HOST: str = "generativelanguage.googleapis.com" + + def __init__( + self, + *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default( + **scopes_kwargs, quota_project_id=quota_project_id + ) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience( + api_audience if api_audience else host + ) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if ( + always_use_jwt_access + and isinstance(credentials, service_account.Credentials) + and hasattr(service_account.Credentials, "with_always_use_jwt_access") + ): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.generate_content: gapic_v1.method.wrap_method( + self.generate_content, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.stream_generate_content: gapic_v1.method.wrap_method( + self.stream_generate_content, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.embed_content: gapic_v1.method.wrap_method( + self.embed_content, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.batch_embed_contents: gapic_v1.method.wrap_method( + self.batch_embed_contents, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.count_tokens: gapic_v1.method.wrap_method( + self.count_tokens, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def generate_content( + self, + ) -> Callable[ + [generative_service.GenerateContentRequest], + Union[ + generative_service.GenerateContentResponse, + Awaitable[generative_service.GenerateContentResponse], + ], + ]: + raise NotImplementedError() + + @property + def stream_generate_content( + self, + ) -> Callable[ + [generative_service.GenerateContentRequest], + Union[ + generative_service.GenerateContentResponse, + Awaitable[generative_service.GenerateContentResponse], + ], + ]: + raise NotImplementedError() + + @property + def embed_content( + self, + ) -> Callable[ + [generative_service.EmbedContentRequest], + Union[ + generative_service.EmbedContentResponse, + Awaitable[generative_service.EmbedContentResponse], + ], + ]: + raise NotImplementedError() + + @property + def batch_embed_contents( + self, + ) -> Callable[ + [generative_service.BatchEmbedContentsRequest], + Union[ + generative_service.BatchEmbedContentsResponse, + Awaitable[generative_service.BatchEmbedContentsResponse], + ], + ]: + raise NotImplementedError() + + @property + def count_tokens( + self, + ) -> Callable[ + [generative_service.CountTokensRequest], + Union[ + generative_service.CountTokensResponse, + Awaitable[generative_service.CountTokensResponse], + ], + ]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[ + operations_pb2.ListOperationsResponse, + Awaitable[operations_pb2.ListOperationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ("GenerativeServiceTransport",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/generative_service/transports/grpc.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/generative_service/transports/grpc.py new file mode 100644 index 000000000000..61a0782d47fa --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/generative_service/transports/grpc.py @@ -0,0 +1,443 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +from google.api_core import gapic_v1, grpc_helpers +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.longrunning import operations_pb2 # type: ignore +import grpc # type: ignore + +from google.ai.generativelanguage_v1.types import generative_service + +from .base import DEFAULT_CLIENT_INFO, GenerativeServiceTransport + + +class GenerativeServiceGrpcTransport(GenerativeServiceTransport): + """gRPC backend transport for GenerativeService. + + API for using Large Models that generate multimodal content + and have additional capabilities beyond text generation. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service.""" + return self._grpc_channel + + @property + def generate_content( + self, + ) -> Callable[ + [generative_service.GenerateContentRequest], + generative_service.GenerateContentResponse, + ]: + r"""Return a callable for the generate content method over gRPC. + + Generates a response from the model given an input + ``GenerateContentRequest``. + + Returns: + Callable[[~.GenerateContentRequest], + ~.GenerateContentResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "generate_content" not in self._stubs: + self._stubs["generate_content"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1.GenerativeService/GenerateContent", + request_serializer=generative_service.GenerateContentRequest.serialize, + response_deserializer=generative_service.GenerateContentResponse.deserialize, + ) + return self._stubs["generate_content"] + + @property + def stream_generate_content( + self, + ) -> Callable[ + [generative_service.GenerateContentRequest], + generative_service.GenerateContentResponse, + ]: + r"""Return a callable for the stream generate content method over gRPC. + + Generates a streamed response from the model given an input + ``GenerateContentRequest``. + + Returns: + Callable[[~.GenerateContentRequest], + ~.GenerateContentResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "stream_generate_content" not in self._stubs: + self._stubs["stream_generate_content"] = self.grpc_channel.unary_stream( + "/google.ai.generativelanguage.v1.GenerativeService/StreamGenerateContent", + request_serializer=generative_service.GenerateContentRequest.serialize, + response_deserializer=generative_service.GenerateContentResponse.deserialize, + ) + return self._stubs["stream_generate_content"] + + @property + def embed_content( + self, + ) -> Callable[ + [generative_service.EmbedContentRequest], + generative_service.EmbedContentResponse, + ]: + r"""Return a callable for the embed content method over gRPC. + + Generates an embedding from the model given an input + ``Content``. + + Returns: + Callable[[~.EmbedContentRequest], + ~.EmbedContentResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "embed_content" not in self._stubs: + self._stubs["embed_content"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1.GenerativeService/EmbedContent", + request_serializer=generative_service.EmbedContentRequest.serialize, + response_deserializer=generative_service.EmbedContentResponse.deserialize, + ) + return self._stubs["embed_content"] + + @property + def batch_embed_contents( + self, + ) -> Callable[ + [generative_service.BatchEmbedContentsRequest], + generative_service.BatchEmbedContentsResponse, + ]: + r"""Return a callable for the batch embed contents method over gRPC. + + Generates multiple embeddings from the model given + input text in a synchronous call. + + Returns: + Callable[[~.BatchEmbedContentsRequest], + ~.BatchEmbedContentsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_embed_contents" not in self._stubs: + self._stubs["batch_embed_contents"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1.GenerativeService/BatchEmbedContents", + request_serializer=generative_service.BatchEmbedContentsRequest.serialize, + response_deserializer=generative_service.BatchEmbedContentsResponse.deserialize, + ) + return self._stubs["batch_embed_contents"] + + @property + def count_tokens( + self, + ) -> Callable[ + [generative_service.CountTokensRequest], generative_service.CountTokensResponse + ]: + r"""Return a callable for the count tokens method over gRPC. + + Runs a model's tokenizer on input content and returns + the token count. + + Returns: + Callable[[~.CountTokensRequest], + ~.CountTokensResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "count_tokens" not in self._stubs: + self._stubs["count_tokens"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1.GenerativeService/CountTokens", + request_serializer=generative_service.CountTokensRequest.serialize, + response_deserializer=generative_service.CountTokensResponse.deserialize, + ) + return self._stubs["count_tokens"] + + def close(self): + self.grpc_channel.close() + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + r"""Return a callable for the list_operations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ("GenerativeServiceGrpcTransport",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/generative_service/transports/grpc_asyncio.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/generative_service/transports/grpc_asyncio.py new file mode 100644 index 000000000000..4804c88a41a1 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/generative_service/transports/grpc_asyncio.py @@ -0,0 +1,443 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +from google.api_core import gapic_v1, grpc_helpers_async +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.longrunning import operations_pb2 # type: ignore +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.ai.generativelanguage_v1.types import generative_service + +from .base import DEFAULT_CLIENT_INFO, GenerativeServiceTransport +from .grpc import GenerativeServiceGrpcTransport + + +class GenerativeServiceGrpcAsyncIOTransport(GenerativeServiceTransport): + """gRPC AsyncIO backend transport for GenerativeService. + + API for using Large Models that generate multimodal content + and have additional capabilities beyond text generation. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def generate_content( + self, + ) -> Callable[ + [generative_service.GenerateContentRequest], + Awaitable[generative_service.GenerateContentResponse], + ]: + r"""Return a callable for the generate content method over gRPC. + + Generates a response from the model given an input + ``GenerateContentRequest``. + + Returns: + Callable[[~.GenerateContentRequest], + Awaitable[~.GenerateContentResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "generate_content" not in self._stubs: + self._stubs["generate_content"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1.GenerativeService/GenerateContent", + request_serializer=generative_service.GenerateContentRequest.serialize, + response_deserializer=generative_service.GenerateContentResponse.deserialize, + ) + return self._stubs["generate_content"] + + @property + def stream_generate_content( + self, + ) -> Callable[ + [generative_service.GenerateContentRequest], + Awaitable[generative_service.GenerateContentResponse], + ]: + r"""Return a callable for the stream generate content method over gRPC. + + Generates a streamed response from the model given an input + ``GenerateContentRequest``. + + Returns: + Callable[[~.GenerateContentRequest], + Awaitable[~.GenerateContentResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "stream_generate_content" not in self._stubs: + self._stubs["stream_generate_content"] = self.grpc_channel.unary_stream( + "/google.ai.generativelanguage.v1.GenerativeService/StreamGenerateContent", + request_serializer=generative_service.GenerateContentRequest.serialize, + response_deserializer=generative_service.GenerateContentResponse.deserialize, + ) + return self._stubs["stream_generate_content"] + + @property + def embed_content( + self, + ) -> Callable[ + [generative_service.EmbedContentRequest], + Awaitable[generative_service.EmbedContentResponse], + ]: + r"""Return a callable for the embed content method over gRPC. + + Generates an embedding from the model given an input + ``Content``. + + Returns: + Callable[[~.EmbedContentRequest], + Awaitable[~.EmbedContentResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "embed_content" not in self._stubs: + self._stubs["embed_content"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1.GenerativeService/EmbedContent", + request_serializer=generative_service.EmbedContentRequest.serialize, + response_deserializer=generative_service.EmbedContentResponse.deserialize, + ) + return self._stubs["embed_content"] + + @property + def batch_embed_contents( + self, + ) -> Callable[ + [generative_service.BatchEmbedContentsRequest], + Awaitable[generative_service.BatchEmbedContentsResponse], + ]: + r"""Return a callable for the batch embed contents method over gRPC. + + Generates multiple embeddings from the model given + input text in a synchronous call. + + Returns: + Callable[[~.BatchEmbedContentsRequest], + Awaitable[~.BatchEmbedContentsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_embed_contents" not in self._stubs: + self._stubs["batch_embed_contents"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1.GenerativeService/BatchEmbedContents", + request_serializer=generative_service.BatchEmbedContentsRequest.serialize, + response_deserializer=generative_service.BatchEmbedContentsResponse.deserialize, + ) + return self._stubs["batch_embed_contents"] + + @property + def count_tokens( + self, + ) -> Callable[ + [generative_service.CountTokensRequest], + Awaitable[generative_service.CountTokensResponse], + ]: + r"""Return a callable for the count tokens method over gRPC. + + Runs a model's tokenizer on input content and returns + the token count. + + Returns: + Callable[[~.CountTokensRequest], + Awaitable[~.CountTokensResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "count_tokens" not in self._stubs: + self._stubs["count_tokens"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1.GenerativeService/CountTokens", + request_serializer=generative_service.CountTokensRequest.serialize, + response_deserializer=generative_service.CountTokensResponse.deserialize, + ) + return self._stubs["count_tokens"] + + def close(self): + return self.grpc_channel.close() + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + r"""Return a callable for the list_operations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + +__all__ = ("GenerativeServiceGrpcAsyncIOTransport",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/generative_service/transports/rest.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/generative_service/transports/rest.py new file mode 100644 index 000000000000..1630e5d48918 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/generative_service/transports/rest.py @@ -0,0 +1,1179 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import dataclasses +import json # type: ignore +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + +from google.api_core import gapic_v1, path_template, rest_helpers, rest_streaming +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.transport.requests import AuthorizedSession # type: ignore +from google.protobuf import json_format +import grpc # type: ignore +from requests import __version__ as requests_version + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + + +from google.longrunning import operations_pb2 # type: ignore + +from google.ai.generativelanguage_v1.types import generative_service + +from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO +from .base import GenerativeServiceTransport + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + + +class GenerativeServiceRestInterceptor: + """Interceptor for GenerativeService. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the GenerativeServiceRestTransport. + + .. code-block:: python + class MyCustomGenerativeServiceInterceptor(GenerativeServiceRestInterceptor): + def pre_batch_embed_contents(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_batch_embed_contents(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_count_tokens(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_count_tokens(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_embed_content(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_embed_content(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_generate_content(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_generate_content(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_stream_generate_content(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_stream_generate_content(self, response): + logging.log(f"Received response: {response}") + return response + + transport = GenerativeServiceRestTransport(interceptor=MyCustomGenerativeServiceInterceptor()) + client = GenerativeServiceClient(transport=transport) + + + """ + + def pre_batch_embed_contents( + self, + request: generative_service.BatchEmbedContentsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[generative_service.BatchEmbedContentsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for batch_embed_contents + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenerativeService server. + """ + return request, metadata + + def post_batch_embed_contents( + self, response: generative_service.BatchEmbedContentsResponse + ) -> generative_service.BatchEmbedContentsResponse: + """Post-rpc interceptor for batch_embed_contents + + Override in a subclass to manipulate the response + after it is returned by the GenerativeService server but before + it is returned to user code. + """ + return response + + def pre_count_tokens( + self, + request: generative_service.CountTokensRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[generative_service.CountTokensRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for count_tokens + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenerativeService server. + """ + return request, metadata + + def post_count_tokens( + self, response: generative_service.CountTokensResponse + ) -> generative_service.CountTokensResponse: + """Post-rpc interceptor for count_tokens + + Override in a subclass to manipulate the response + after it is returned by the GenerativeService server but before + it is returned to user code. + """ + return response + + def pre_embed_content( + self, + request: generative_service.EmbedContentRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[generative_service.EmbedContentRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for embed_content + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenerativeService server. + """ + return request, metadata + + def post_embed_content( + self, response: generative_service.EmbedContentResponse + ) -> generative_service.EmbedContentResponse: + """Post-rpc interceptor for embed_content + + Override in a subclass to manipulate the response + after it is returned by the GenerativeService server but before + it is returned to user code. + """ + return response + + def pre_generate_content( + self, + request: generative_service.GenerateContentRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[generative_service.GenerateContentRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for generate_content + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenerativeService server. + """ + return request, metadata + + def post_generate_content( + self, response: generative_service.GenerateContentResponse + ) -> generative_service.GenerateContentResponse: + """Post-rpc interceptor for generate_content + + Override in a subclass to manipulate the response + after it is returned by the GenerativeService server but before + it is returned to user code. + """ + return response + + def pre_stream_generate_content( + self, + request: generative_service.GenerateContentRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[generative_service.GenerateContentRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for stream_generate_content + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenerativeService server. + """ + return request, metadata + + def post_stream_generate_content( + self, response: rest_streaming.ResponseIterator + ) -> rest_streaming.ResponseIterator: + """Post-rpc interceptor for stream_generate_content + + Override in a subclass to manipulate the response + after it is returned by the GenerativeService server but before + it is returned to user code. + """ + return response + + def pre_cancel_operation( + self, + request: operations_pb2.CancelOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.CancelOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenerativeService server. + """ + return request, metadata + + def post_cancel_operation(self, response: None) -> None: + """Post-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the response + after it is returned by the GenerativeService server but before + it is returned to user code. + """ + return response + + def pre_get_operation( + self, + request: operations_pb2.GetOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.GetOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenerativeService server. + """ + return request, metadata + + def post_get_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for get_operation + + Override in a subclass to manipulate the response + after it is returned by the GenerativeService server but before + it is returned to user code. + """ + return response + + def pre_list_operations( + self, + request: operations_pb2.ListOperationsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.ListOperationsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_operations + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenerativeService server. + """ + return request, metadata + + def post_list_operations( + self, response: operations_pb2.ListOperationsResponse + ) -> operations_pb2.ListOperationsResponse: + """Post-rpc interceptor for list_operations + + Override in a subclass to manipulate the response + after it is returned by the GenerativeService server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class GenerativeServiceRestStub: + _session: AuthorizedSession + _host: str + _interceptor: GenerativeServiceRestInterceptor + + +class GenerativeServiceRestTransport(GenerativeServiceTransport): + """REST backend transport for GenerativeService. + + API for using Large Models that generate multimodal content + and have additional capabilities beyond text generation. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + + """ + + def __init__( + self, + *, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + interceptor: Optional[GenerativeServiceRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError( + f"Unexpected hostname structure: {host}" + ) # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST + ) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or GenerativeServiceRestInterceptor() + self._prep_wrapped_messages(client_info) + + class _BatchEmbedContents(GenerativeServiceRestStub): + def __hash__(self): + return hash("BatchEmbedContents") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: generative_service.BatchEmbedContentsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> generative_service.BatchEmbedContentsResponse: + r"""Call the batch embed contents method over HTTP. + + Args: + request (~.generative_service.BatchEmbedContentsRequest): + The request object. Batch request to get embeddings from + the model for a list of prompts. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.generative_service.BatchEmbedContentsResponse: + The response to a ``BatchEmbedContentsRequest``. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{model=models/*}:batchEmbedContents", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_batch_embed_contents( + request, metadata + ) + pb_request = generative_service.BatchEmbedContentsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = generative_service.BatchEmbedContentsResponse() + pb_resp = generative_service.BatchEmbedContentsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_batch_embed_contents(resp) + return resp + + class _CountTokens(GenerativeServiceRestStub): + def __hash__(self): + return hash("CountTokens") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: generative_service.CountTokensRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> generative_service.CountTokensResponse: + r"""Call the count tokens method over HTTP. + + Args: + request (~.generative_service.CountTokensRequest): + The request object. Counts the number of tokens in the ``prompt`` sent to a + model. + + Models may tokenize text differently, so each model may + return a different ``token_count``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.generative_service.CountTokensResponse: + A response from ``CountTokens``. + + It returns the model's ``token_count`` for the + ``prompt``. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{model=models/*}:countTokens", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_count_tokens(request, metadata) + pb_request = generative_service.CountTokensRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = generative_service.CountTokensResponse() + pb_resp = generative_service.CountTokensResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_count_tokens(resp) + return resp + + class _EmbedContent(GenerativeServiceRestStub): + def __hash__(self): + return hash("EmbedContent") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: generative_service.EmbedContentRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> generative_service.EmbedContentResponse: + r"""Call the embed content method over HTTP. + + Args: + request (~.generative_service.EmbedContentRequest): + The request object. Request containing the ``Content`` for the model to + embed. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.generative_service.EmbedContentResponse: + The response to an ``EmbedContentRequest``. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{model=models/*}:embedContent", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_embed_content(request, metadata) + pb_request = generative_service.EmbedContentRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = generative_service.EmbedContentResponse() + pb_resp = generative_service.EmbedContentResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_embed_content(resp) + return resp + + class _GenerateContent(GenerativeServiceRestStub): + def __hash__(self): + return hash("GenerateContent") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: generative_service.GenerateContentRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> generative_service.GenerateContentResponse: + r"""Call the generate content method over HTTP. + + Args: + request (~.generative_service.GenerateContentRequest): + The request object. Request to generate a completion from + the model. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.generative_service.GenerateContentResponse: + Response from the model supporting multiple candidates. + + Note on safety ratings and content filtering. They are + reported for both prompt in + ``GenerateContentResponse.prompt_feedback`` and for each + candidate in ``finish_reason`` and in + ``safety_ratings``. The API contract is that: + + - either all requested candidates are returned or no + candidates at all + - no candidates are returned only if there was + something wrong with the prompt (see + ``prompt_feedback``) + - feedback on each candidate is reported on + ``finish_reason`` and ``safety_ratings``. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{model=models/*}:generateContent", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_generate_content( + request, metadata + ) + pb_request = generative_service.GenerateContentRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = generative_service.GenerateContentResponse() + pb_resp = generative_service.GenerateContentResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_generate_content(resp) + return resp + + class _StreamGenerateContent(GenerativeServiceRestStub): + def __hash__(self): + return hash("StreamGenerateContent") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: generative_service.GenerateContentRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> rest_streaming.ResponseIterator: + r"""Call the stream generate content method over HTTP. + + Args: + request (~.generative_service.GenerateContentRequest): + The request object. Request to generate a completion from + the model. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.generative_service.GenerateContentResponse: + Response from the model supporting multiple candidates. + + Note on safety ratings and content filtering. They are + reported for both prompt in + ``GenerateContentResponse.prompt_feedback`` and for each + candidate in ``finish_reason`` and in + ``safety_ratings``. The API contract is that: + + - either all requested candidates are returned or no + candidates at all + - no candidates are returned only if there was + something wrong with the prompt (see + ``prompt_feedback``) + - feedback on each candidate is reported on + ``finish_reason`` and ``safety_ratings``. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{model=models/*}:streamGenerateContent", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_stream_generate_content( + request, metadata + ) + pb_request = generative_service.GenerateContentRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = rest_streaming.ResponseIterator( + response, generative_service.GenerateContentResponse + ) + resp = self._interceptor.post_stream_generate_content(resp) + return resp + + @property + def batch_embed_contents( + self, + ) -> Callable[ + [generative_service.BatchEmbedContentsRequest], + generative_service.BatchEmbedContentsResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._BatchEmbedContents(self._session, self._host, self._interceptor) # type: ignore + + @property + def count_tokens( + self, + ) -> Callable[ + [generative_service.CountTokensRequest], generative_service.CountTokensResponse + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CountTokens(self._session, self._host, self._interceptor) # type: ignore + + @property + def embed_content( + self, + ) -> Callable[ + [generative_service.EmbedContentRequest], + generative_service.EmbedContentResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._EmbedContent(self._session, self._host, self._interceptor) # type: ignore + + @property + def generate_content( + self, + ) -> Callable[ + [generative_service.GenerateContentRequest], + generative_service.GenerateContentResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GenerateContent(self._session, self._host, self._interceptor) # type: ignore + + @property + def stream_generate_content( + self, + ) -> Callable[ + [generative_service.GenerateContentRequest], + generative_service.GenerateContentResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._StreamGenerateContent(self._session, self._host, self._interceptor) # type: ignore + + @property + def cancel_operation(self): + return self._CancelOperation(self._session, self._host, self._interceptor) # type: ignore + + class _CancelOperation(GenerativeServiceRestStub): + def __call__( + self, + request: operations_pb2.CancelOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Call the cancel operation method over HTTP. + + Args: + request (operations_pb2.CancelOperationRequest): + The request object for CancelOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{name=tunedModels/*/operations/*}:cancel", + "body": "*", + }, + ] + + request, metadata = self._interceptor.pre_cancel_operation( + request, metadata + ) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + body = json.dumps(transcoded_request["body"]) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + return self._interceptor.post_cancel_operation(None) + + @property + def get_operation(self): + return self._GetOperation(self._session, self._host, self._interceptor) # type: ignore + + class _GetOperation(GenerativeServiceRestStub): + def __call__( + self, + request: operations_pb2.GetOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the get operation method over HTTP. + + Args: + request (operations_pb2.GetOperationRequest): + The request object for GetOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.Operation: Response from GetOperation method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{name=tunedModels/*/operations/*}", + }, + ] + + request, metadata = self._interceptor.pre_get_operation(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.Operation() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_operation(resp) + return resp + + @property + def list_operations(self): + return self._ListOperations(self._session, self._host, self._interceptor) # type: ignore + + class _ListOperations(GenerativeServiceRestStub): + def __call__( + self, + request: operations_pb2.ListOperationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Call the list operations method over HTTP. + + Args: + request (operations_pb2.ListOperationsRequest): + The request object for ListOperations method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.ListOperationsResponse: Response from ListOperations method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{name=operations}", + }, + { + "method": "get", + "uri": "/v1/{name=tunedModels/*}/operations", + }, + ] + + request, metadata = self._interceptor.pre_list_operations(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.ListOperationsResponse() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_list_operations(resp) + return resp + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__ = ("GenerativeServiceRestTransport",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/__init__.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/__init__.py new file mode 100644 index 000000000000..5738b8bf4239 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .async_client import ModelServiceAsyncClient +from .client import ModelServiceClient + +__all__ = ( + "ModelServiceClient", + "ModelServiceAsyncClient", +) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/async_client.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/async_client.py new file mode 100644 index 000000000000..78a5e79f603b --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/async_client.py @@ -0,0 +1,625 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry_async as retries +from google.api_core.client_options import ClientOptions +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.ai.generativelanguage_v1 import gapic_version as package_version + +try: + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore + +from google.longrunning import operations_pb2 # type: ignore + +from google.ai.generativelanguage_v1.services.model_service import pagers +from google.ai.generativelanguage_v1.types import model, model_service + +from .client import ModelServiceClient +from .transports.base import DEFAULT_CLIENT_INFO, ModelServiceTransport +from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport + + +class ModelServiceAsyncClient: + """Provides methods for getting metadata information about + Generative Models. + """ + + _client: ModelServiceClient + + DEFAULT_ENDPOINT = ModelServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = ModelServiceClient.DEFAULT_MTLS_ENDPOINT + + model_path = staticmethod(ModelServiceClient.model_path) + parse_model_path = staticmethod(ModelServiceClient.parse_model_path) + common_billing_account_path = staticmethod( + ModelServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + ModelServiceClient.parse_common_billing_account_path + ) + common_folder_path = staticmethod(ModelServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(ModelServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(ModelServiceClient.common_organization_path) + parse_common_organization_path = staticmethod( + ModelServiceClient.parse_common_organization_path + ) + common_project_path = staticmethod(ModelServiceClient.common_project_path) + parse_common_project_path = staticmethod( + ModelServiceClient.parse_common_project_path + ) + common_location_path = staticmethod(ModelServiceClient.common_location_path) + parse_common_location_path = staticmethod( + ModelServiceClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ModelServiceAsyncClient: The constructed client. + """ + return ModelServiceClient.from_service_account_info.__func__(ModelServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ModelServiceAsyncClient: The constructed client. + """ + return ModelServiceClient.from_service_account_file.__func__(ModelServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return ModelServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> ModelServiceTransport: + """Returns the transport used by the client instance. + + Returns: + ModelServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial( + type(ModelServiceClient).get_transport_class, type(ModelServiceClient) + ) + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, ModelServiceTransport] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the model service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.ModelServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = ModelServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def get_model( + self, + request: Optional[Union[model_service.GetModelRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model.Model: + r"""Gets information about a specific Model. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1 + + async def sample_get_model(): + # Create a client + client = generativelanguage_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1.GetModelRequest( + name="name_value", + ) + + # Make the request + response = await client.get_model(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1.types.GetModelRequest, dict]]): + The request object. Request for getting information about + a specific Model. + name (:class:`str`): + Required. The resource name of the model. + + This name should match a model name returned by the + ``ListModels`` method. + + Format: ``models/{model}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1.types.Model: + Information about a Generative + Language Model. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = model_service.GetModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_model, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_models( + self, + request: Optional[Union[model_service.ListModelsRequest, dict]] = None, + *, + page_size: Optional[int] = None, + page_token: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelsAsyncPager: + r"""Lists models available through the API. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1 + + async def sample_list_models(): + # Create a client + client = generativelanguage_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1.ListModelsRequest( + ) + + # Make the request + page_result = client.list_models(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1.types.ListModelsRequest, dict]]): + The request object. Request for listing all Models. + page_size (:class:`int`): + The maximum number of ``Models`` to return (per page). + + The service may return fewer models. If unspecified, at + most 50 models will be returned per page. This method + returns at most 1000 models per page, even if you pass a + larger page_size. + + This corresponds to the ``page_size`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + page_token (:class:`str`): + A page token, received from a previous ``ListModels`` + call. + + Provide the ``page_token`` returned by one request as an + argument to the next request to retrieve the next page. + + When paginating, all other parameters provided to + ``ListModels`` must match the call that provided the + page token. + + This corresponds to the ``page_token`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1.services.model_service.pagers.ListModelsAsyncPager: + Response from ListModel containing a paginated list of + Models. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([page_size, page_token]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = model_service.ListModelsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if page_size is not None: + request.page_size = page_size + if page_token is not None: + request.page_token = page_token + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_models, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListModelsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def __aenter__(self) -> "ModelServiceAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("ModelServiceAsyncClient",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/client.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/client.py new file mode 100644 index 000000000000..e806af2e2aec --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/client.py @@ -0,0 +1,856 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.ai.generativelanguage_v1 import gapic_version as package_version + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.longrunning import operations_pb2 # type: ignore + +from google.ai.generativelanguage_v1.services.model_service import pagers +from google.ai.generativelanguage_v1.types import model, model_service + +from .transports.base import DEFAULT_CLIENT_INFO, ModelServiceTransport +from .transports.grpc import ModelServiceGrpcTransport +from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport +from .transports.rest import ModelServiceRestTransport + + +class ModelServiceClientMeta(type): + """Metaclass for the ModelService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = OrderedDict() # type: Dict[str, Type[ModelServiceTransport]] + _transport_registry["grpc"] = ModelServiceGrpcTransport + _transport_registry["grpc_asyncio"] = ModelServiceGrpcAsyncIOTransport + _transport_registry["rest"] = ModelServiceRestTransport + + def get_transport_class( + cls, + label: Optional[str] = None, + ) -> Type[ModelServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class ModelServiceClient(metaclass=ModelServiceClientMeta): + """Provides methods for getting metadata information about + Generative Models. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "generativelanguage.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ModelServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ModelServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> ModelServiceTransport: + """Returns the transport used by the client instance. + + Returns: + ModelServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def model_path( + model: str, + ) -> str: + """Returns a fully-qualified model string.""" + return "models/{model}".format( + model=model, + ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str, str]: + """Parses a model path into its component segments.""" + m = re.match(r"^models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path( + billing_account: str, + ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path( + folder: str, + ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format( + folder=folder, + ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path( + organization: str, + ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format( + organization=organization, + ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path( + project: str, + ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format( + project=project, + ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path( + project: str, + location: str, + ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[client_options_lib.ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[Union[str, ModelServiceTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the model service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ModelServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + client_options = cast(client_options_lib.ClientOptions, client_options) + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( + client_options + ) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError( + "client_options.api_key and credentials are mutually exclusive" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, ModelServiceTransport): + # transport is a ModelServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr( + google.auth._default, "get_api_key_credentials" + ): + credentials = google.auth._default.get_api_key_credentials( + api_key_value + ) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=client_options.api_audience, + ) + + def get_model( + self, + request: Optional[Union[model_service.GetModelRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model.Model: + r"""Gets information about a specific Model. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1 + + def sample_get_model(): + # Create a client + client = generativelanguage_v1.ModelServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1.GetModelRequest( + name="name_value", + ) + + # Make the request + response = client.get_model(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1.types.GetModelRequest, dict]): + The request object. Request for getting information about + a specific Model. + name (str): + Required. The resource name of the model. + + This name should match a model name returned by the + ``ListModels`` method. + + Format: ``models/{model}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1.types.Model: + Information about a Generative + Language Model. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.GetModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.GetModelRequest): + request = model_service.GetModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_models( + self, + request: Optional[Union[model_service.ListModelsRequest, dict]] = None, + *, + page_size: Optional[int] = None, + page_token: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelsPager: + r"""Lists models available through the API. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1 + + def sample_list_models(): + # Create a client + client = generativelanguage_v1.ModelServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1.ListModelsRequest( + ) + + # Make the request + page_result = client.list_models(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1.types.ListModelsRequest, dict]): + The request object. Request for listing all Models. + page_size (int): + The maximum number of ``Models`` to return (per page). + + The service may return fewer models. If unspecified, at + most 50 models will be returned per page. This method + returns at most 1000 models per page, even if you pass a + larger page_size. + + This corresponds to the ``page_size`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + page_token (str): + A page token, received from a previous ``ListModels`` + call. + + Provide the ``page_token`` returned by one request as an + argument to the next request to retrieve the next page. + + When paginating, all other parameters provided to + ``ListModels`` must match the call that provided the + page token. + + This corresponds to the ``page_token`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1.services.model_service.pagers.ListModelsPager: + Response from ListModel containing a paginated list of + Models. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([page_size, page_token]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.ListModelsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.ListModelsRequest): + request = model_service.ListModelsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if page_size is not None: + request.page_size = page_size + if page_token is not None: + request.page_token = page_token + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_models] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListModelsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self) -> "ModelServiceClient": + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("ModelServiceClient",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/pagers.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/pagers.py new file mode 100644 index 000000000000..036124acfb49 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/pagers.py @@ -0,0 +1,155 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import ( + Any, + AsyncIterator, + Awaitable, + Callable, + Iterator, + Optional, + Sequence, + Tuple, +) + +from google.ai.generativelanguage_v1.types import model, model_service + + +class ListModelsPager: + """A pager for iterating through ``list_models`` requests. + + This class thinly wraps an initial + :class:`google.ai.generativelanguage_v1.types.ListModelsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``models`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListModels`` requests and continue to iterate + through the ``models`` field on the + corresponding responses. + + All the usual :class:`google.ai.generativelanguage_v1.types.ListModelsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., model_service.ListModelsResponse], + request: model_service.ListModelsRequest, + response: model_service.ListModelsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.ai.generativelanguage_v1.types.ListModelsRequest): + The initial request object. + response (google.ai.generativelanguage_v1.types.ListModelsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListModelsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[model_service.ListModelsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[model.Model]: + for page in self.pages: + yield from page.models + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListModelsAsyncPager: + """A pager for iterating through ``list_models`` requests. + + This class thinly wraps an initial + :class:`google.ai.generativelanguage_v1.types.ListModelsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``models`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListModels`` requests and continue to iterate + through the ``models`` field on the + corresponding responses. + + All the usual :class:`google.ai.generativelanguage_v1.types.ListModelsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[model_service.ListModelsResponse]], + request: model_service.ListModelsRequest, + response: model_service.ListModelsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.ai.generativelanguage_v1.types.ListModelsRequest): + The initial request object. + response (google.ai.generativelanguage_v1.types.ListModelsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListModelsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[model_service.ListModelsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[model.Model]: + async def async_generator(): + async for page in self.pages: + for response in page.models: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/transports/__init__.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/transports/__init__.py new file mode 100644 index 000000000000..1b430a25489e --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/transports/__init__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import ModelServiceTransport +from .grpc import ModelServiceGrpcTransport +from .grpc_asyncio import ModelServiceGrpcAsyncIOTransport +from .rest import ModelServiceRestInterceptor, ModelServiceRestTransport + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[ModelServiceTransport]] +_transport_registry["grpc"] = ModelServiceGrpcTransport +_transport_registry["grpc_asyncio"] = ModelServiceGrpcAsyncIOTransport +_transport_registry["rest"] = ModelServiceRestTransport + +__all__ = ( + "ModelServiceTransport", + "ModelServiceGrpcTransport", + "ModelServiceGrpcAsyncIOTransport", + "ModelServiceRestTransport", + "ModelServiceRestInterceptor", +) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/transports/base.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/transports/base.py new file mode 100644 index 000000000000..7c5be644584f --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/transports/base.py @@ -0,0 +1,199 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.ai.generativelanguage_v1 import gapic_version as package_version +from google.ai.generativelanguage_v1.types import model, model_service + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +class ModelServiceTransport(abc.ABC): + """Abstract transport class for ModelService.""" + + AUTH_SCOPES = () + + DEFAULT_HOST: str = "generativelanguage.googleapis.com" + + def __init__( + self, + *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default( + **scopes_kwargs, quota_project_id=quota_project_id + ) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience( + api_audience if api_audience else host + ) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if ( + always_use_jwt_access + and isinstance(credentials, service_account.Credentials) + and hasattr(service_account.Credentials, "with_always_use_jwt_access") + ): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.get_model: gapic_v1.method.wrap_method( + self.get_model, + default_timeout=None, + client_info=client_info, + ), + self.list_models: gapic_v1.method.wrap_method( + self.list_models, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def get_model( + self, + ) -> Callable[ + [model_service.GetModelRequest], Union[model.Model, Awaitable[model.Model]] + ]: + raise NotImplementedError() + + @property + def list_models( + self, + ) -> Callable[ + [model_service.ListModelsRequest], + Union[ + model_service.ListModelsResponse, + Awaitable[model_service.ListModelsResponse], + ], + ]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[ + operations_pb2.ListOperationsResponse, + Awaitable[operations_pb2.ListOperationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ("ModelServiceTransport",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/transports/grpc.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/transports/grpc.py new file mode 100644 index 000000000000..cbd253b4ec85 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/transports/grpc.py @@ -0,0 +1,344 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +from google.api_core import gapic_v1, grpc_helpers +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.longrunning import operations_pb2 # type: ignore +import grpc # type: ignore + +from google.ai.generativelanguage_v1.types import model, model_service + +from .base import DEFAULT_CLIENT_INFO, ModelServiceTransport + + +class ModelServiceGrpcTransport(ModelServiceTransport): + """gRPC backend transport for ModelService. + + Provides methods for getting metadata information about + Generative Models. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service.""" + return self._grpc_channel + + @property + def get_model(self) -> Callable[[model_service.GetModelRequest], model.Model]: + r"""Return a callable for the get model method over gRPC. + + Gets information about a specific Model. + + Returns: + Callable[[~.GetModelRequest], + ~.Model]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_model" not in self._stubs: + self._stubs["get_model"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1.ModelService/GetModel", + request_serializer=model_service.GetModelRequest.serialize, + response_deserializer=model.Model.deserialize, + ) + return self._stubs["get_model"] + + @property + def list_models( + self, + ) -> Callable[[model_service.ListModelsRequest], model_service.ListModelsResponse]: + r"""Return a callable for the list models method over gRPC. + + Lists models available through the API. + + Returns: + Callable[[~.ListModelsRequest], + ~.ListModelsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_models" not in self._stubs: + self._stubs["list_models"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1.ModelService/ListModels", + request_serializer=model_service.ListModelsRequest.serialize, + response_deserializer=model_service.ListModelsResponse.deserialize, + ) + return self._stubs["list_models"] + + def close(self): + self.grpc_channel.close() + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + r"""Return a callable for the list_operations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ("ModelServiceGrpcTransport",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/transports/grpc_asyncio.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/transports/grpc_asyncio.py new file mode 100644 index 000000000000..47ff53a30928 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/transports/grpc_asyncio.py @@ -0,0 +1,347 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +from google.api_core import gapic_v1, grpc_helpers_async +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.longrunning import operations_pb2 # type: ignore +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.ai.generativelanguage_v1.types import model, model_service + +from .base import DEFAULT_CLIENT_INFO, ModelServiceTransport +from .grpc import ModelServiceGrpcTransport + + +class ModelServiceGrpcAsyncIOTransport(ModelServiceTransport): + """gRPC AsyncIO backend transport for ModelService. + + Provides methods for getting metadata information about + Generative Models. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def get_model( + self, + ) -> Callable[[model_service.GetModelRequest], Awaitable[model.Model]]: + r"""Return a callable for the get model method over gRPC. + + Gets information about a specific Model. + + Returns: + Callable[[~.GetModelRequest], + Awaitable[~.Model]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_model" not in self._stubs: + self._stubs["get_model"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1.ModelService/GetModel", + request_serializer=model_service.GetModelRequest.serialize, + response_deserializer=model.Model.deserialize, + ) + return self._stubs["get_model"] + + @property + def list_models( + self, + ) -> Callable[ + [model_service.ListModelsRequest], Awaitable[model_service.ListModelsResponse] + ]: + r"""Return a callable for the list models method over gRPC. + + Lists models available through the API. + + Returns: + Callable[[~.ListModelsRequest], + Awaitable[~.ListModelsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_models" not in self._stubs: + self._stubs["list_models"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1.ModelService/ListModels", + request_serializer=model_service.ListModelsRequest.serialize, + response_deserializer=model_service.ListModelsResponse.deserialize, + ) + return self._stubs["list_models"] + + def close(self): + return self.grpc_channel.close() + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + r"""Return a callable for the list_operations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + +__all__ = ("ModelServiceGrpcAsyncIOTransport",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/transports/rest.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/transports/rest.py new file mode 100644 index 000000000000..fd9c2c508cd2 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/services/model_service/transports/rest.py @@ -0,0 +1,688 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import dataclasses +import json # type: ignore +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + +from google.api_core import gapic_v1, path_template, rest_helpers, rest_streaming +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.transport.requests import AuthorizedSession # type: ignore +from google.protobuf import json_format +import grpc # type: ignore +from requests import __version__ as requests_version + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + + +from google.longrunning import operations_pb2 # type: ignore + +from google.ai.generativelanguage_v1.types import model, model_service + +from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO +from .base import ModelServiceTransport + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + + +class ModelServiceRestInterceptor: + """Interceptor for ModelService. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the ModelServiceRestTransport. + + .. code-block:: python + class MyCustomModelServiceInterceptor(ModelServiceRestInterceptor): + def pre_get_model(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_model(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_models(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_models(self, response): + logging.log(f"Received response: {response}") + return response + + transport = ModelServiceRestTransport(interceptor=MyCustomModelServiceInterceptor()) + client = ModelServiceClient(transport=transport) + + + """ + + def pre_get_model( + self, + request: model_service.GetModelRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[model_service.GetModelRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_model + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelService server. + """ + return request, metadata + + def post_get_model(self, response: model.Model) -> model.Model: + """Post-rpc interceptor for get_model + + Override in a subclass to manipulate the response + after it is returned by the ModelService server but before + it is returned to user code. + """ + return response + + def pre_list_models( + self, + request: model_service.ListModelsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[model_service.ListModelsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_models + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelService server. + """ + return request, metadata + + def post_list_models( + self, response: model_service.ListModelsResponse + ) -> model_service.ListModelsResponse: + """Post-rpc interceptor for list_models + + Override in a subclass to manipulate the response + after it is returned by the ModelService server but before + it is returned to user code. + """ + return response + + def pre_cancel_operation( + self, + request: operations_pb2.CancelOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.CancelOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelService server. + """ + return request, metadata + + def post_cancel_operation(self, response: None) -> None: + """Post-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the response + after it is returned by the ModelService server but before + it is returned to user code. + """ + return response + + def pre_get_operation( + self, + request: operations_pb2.GetOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.GetOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelService server. + """ + return request, metadata + + def post_get_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for get_operation + + Override in a subclass to manipulate the response + after it is returned by the ModelService server but before + it is returned to user code. + """ + return response + + def pre_list_operations( + self, + request: operations_pb2.ListOperationsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.ListOperationsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_operations + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelService server. + """ + return request, metadata + + def post_list_operations( + self, response: operations_pb2.ListOperationsResponse + ) -> operations_pb2.ListOperationsResponse: + """Post-rpc interceptor for list_operations + + Override in a subclass to manipulate the response + after it is returned by the ModelService server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class ModelServiceRestStub: + _session: AuthorizedSession + _host: str + _interceptor: ModelServiceRestInterceptor + + +class ModelServiceRestTransport(ModelServiceTransport): + """REST backend transport for ModelService. + + Provides methods for getting metadata information about + Generative Models. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + + """ + + def __init__( + self, + *, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + interceptor: Optional[ModelServiceRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError( + f"Unexpected hostname structure: {host}" + ) # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST + ) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or ModelServiceRestInterceptor() + self._prep_wrapped_messages(client_info) + + class _GetModel(ModelServiceRestStub): + def __hash__(self): + return hash("GetModel") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: model_service.GetModelRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model.Model: + r"""Call the get model method over HTTP. + + Args: + request (~.model_service.GetModelRequest): + The request object. Request for getting information about + a specific Model. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.model.Model: + Information about a Generative + Language Model. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{name=models/*}", + }, + ] + request, metadata = self._interceptor.pre_get_model(request, metadata) + pb_request = model_service.GetModelRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = model.Model() + pb_resp = model.Model.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_model(resp) + return resp + + class _ListModels(ModelServiceRestStub): + def __hash__(self): + return hash("ListModels") + + def __call__( + self, + request: model_service.ListModelsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_service.ListModelsResponse: + r"""Call the list models method over HTTP. + + Args: + request (~.model_service.ListModelsRequest): + The request object. Request for listing all Models. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.model_service.ListModelsResponse: + Response from ``ListModel`` containing a paginated list + of Models. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/models", + }, + ] + request, metadata = self._interceptor.pre_list_models(request, metadata) + pb_request = model_service.ListModelsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = model_service.ListModelsResponse() + pb_resp = model_service.ListModelsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_models(resp) + return resp + + @property + def get_model(self) -> Callable[[model_service.GetModelRequest], model.Model]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetModel(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_models( + self, + ) -> Callable[[model_service.ListModelsRequest], model_service.ListModelsResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListModels(self._session, self._host, self._interceptor) # type: ignore + + @property + def cancel_operation(self): + return self._CancelOperation(self._session, self._host, self._interceptor) # type: ignore + + class _CancelOperation(ModelServiceRestStub): + def __call__( + self, + request: operations_pb2.CancelOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Call the cancel operation method over HTTP. + + Args: + request (operations_pb2.CancelOperationRequest): + The request object for CancelOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{name=tunedModels/*/operations/*}:cancel", + "body": "*", + }, + ] + + request, metadata = self._interceptor.pre_cancel_operation( + request, metadata + ) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + body = json.dumps(transcoded_request["body"]) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + return self._interceptor.post_cancel_operation(None) + + @property + def get_operation(self): + return self._GetOperation(self._session, self._host, self._interceptor) # type: ignore + + class _GetOperation(ModelServiceRestStub): + def __call__( + self, + request: operations_pb2.GetOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the get operation method over HTTP. + + Args: + request (operations_pb2.GetOperationRequest): + The request object for GetOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.Operation: Response from GetOperation method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{name=tunedModels/*/operations/*}", + }, + ] + + request, metadata = self._interceptor.pre_get_operation(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.Operation() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_operation(resp) + return resp + + @property + def list_operations(self): + return self._ListOperations(self._session, self._host, self._interceptor) # type: ignore + + class _ListOperations(ModelServiceRestStub): + def __call__( + self, + request: operations_pb2.ListOperationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Call the list operations method over HTTP. + + Args: + request (operations_pb2.ListOperationsRequest): + The request object for ListOperations method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.ListOperationsResponse: Response from ListOperations method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{name=operations}", + }, + { + "method": "get", + "uri": "/v1/{name=tunedModels/*}/operations", + }, + ] + + request, metadata = self._interceptor.pre_list_operations(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.ListOperationsResponse() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_list_operations(resp) + return resp + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__ = ("ModelServiceRestTransport",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/types/__init__.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/types/__init__.py new file mode 100644 index 000000000000..88334b0f1dc4 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/types/__init__.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .citation import CitationMetadata, CitationSource +from .content import Blob, Content, Part +from .generative_service import ( + BatchEmbedContentsRequest, + BatchEmbedContentsResponse, + Candidate, + ContentEmbedding, + CountTokensRequest, + CountTokensResponse, + EmbedContentRequest, + EmbedContentResponse, + GenerateContentRequest, + GenerateContentResponse, + GenerationConfig, + TaskType, +) +from .model import Model +from .model_service import GetModelRequest, ListModelsRequest, ListModelsResponse +from .safety import HarmCategory, SafetyRating, SafetySetting + +__all__ = ( + "CitationMetadata", + "CitationSource", + "Blob", + "Content", + "Part", + "BatchEmbedContentsRequest", + "BatchEmbedContentsResponse", + "Candidate", + "ContentEmbedding", + "CountTokensRequest", + "CountTokensResponse", + "EmbedContentRequest", + "EmbedContentResponse", + "GenerateContentRequest", + "GenerateContentResponse", + "GenerationConfig", + "TaskType", + "Model", + "GetModelRequest", + "ListModelsRequest", + "ListModelsResponse", + "SafetyRating", + "SafetySetting", + "HarmCategory", +) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/types/citation.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/types/citation.py new file mode 100644 index 000000000000..b3c3a6ff6681 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/types/citation.py @@ -0,0 +1,101 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +__protobuf__ = proto.module( + package="google.ai.generativelanguage.v1", + manifest={ + "CitationMetadata", + "CitationSource", + }, +) + + +class CitationMetadata(proto.Message): + r"""A collection of source attributions for a piece of content. + + Attributes: + citation_sources (MutableSequence[google.ai.generativelanguage_v1.types.CitationSource]): + Citations to sources for a specific response. + """ + + citation_sources: MutableSequence["CitationSource"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="CitationSource", + ) + + +class CitationSource(proto.Message): + r"""A citation to a source for a portion of a specific response. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + start_index (int): + Optional. Start of segment of the response + that is attributed to this source. + + Index indicates the start of the segment, + measured in bytes. + + This field is a member of `oneof`_ ``_start_index``. + end_index (int): + Optional. End of the attributed segment, + exclusive. + + This field is a member of `oneof`_ ``_end_index``. + uri (str): + Optional. URI that is attributed as a source + for a portion of the text. + + This field is a member of `oneof`_ ``_uri``. + license_ (str): + Optional. License for the GitHub project that + is attributed as a source for segment. + + License info is required for code citations. + + This field is a member of `oneof`_ ``_license``. + """ + + start_index: int = proto.Field( + proto.INT32, + number=1, + optional=True, + ) + end_index: int = proto.Field( + proto.INT32, + number=2, + optional=True, + ) + uri: str = proto.Field( + proto.STRING, + number=3, + optional=True, + ) + license_: str = proto.Field( + proto.STRING, + number=4, + optional=True, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/types/content.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/types/content.py new file mode 100644 index 000000000000..067597740b04 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/types/content.py @@ -0,0 +1,130 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +__protobuf__ = proto.module( + package="google.ai.generativelanguage.v1", + manifest={ + "Content", + "Part", + "Blob", + }, +) + + +class Content(proto.Message): + r"""The base structured datatype containing multi-part content of a + message. + + A ``Content`` includes a ``role`` field designating the producer of + the ``Content`` and a ``parts`` field containing multi-part data + that contains the content of the message turn. + + Attributes: + parts (MutableSequence[google.ai.generativelanguage_v1.types.Part]): + Ordered ``Parts`` that constitute a single message. Parts + may have different MIME types. + role (str): + Optional. The producer of the content. Must + be either 'user' or 'model'. + Useful to set for multi-turn conversations, + otherwise can be left blank or unset. + """ + + parts: MutableSequence["Part"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="Part", + ) + role: str = proto.Field( + proto.STRING, + number=2, + ) + + +class Part(proto.Message): + r"""A datatype containing media that is part of a multi-part ``Content`` + message. + + A ``Part`` consists of data which has an associated datatype. A + ``Part`` can only contain one of the accepted types in + ``Part.data``. + + A ``Part`` must have a fixed IANA MIME type identifying the type and + subtype of the media if the ``inline_data`` field is filled with raw + bytes. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + text (str): + Inline text. + + This field is a member of `oneof`_ ``data``. + inline_data (google.ai.generativelanguage_v1.types.Blob): + Inline media bytes. + + This field is a member of `oneof`_ ``data``. + """ + + text: str = proto.Field( + proto.STRING, + number=2, + oneof="data", + ) + inline_data: "Blob" = proto.Field( + proto.MESSAGE, + number=3, + oneof="data", + message="Blob", + ) + + +class Blob(proto.Message): + r"""Raw media bytes. + + Text should not be sent as raw bytes, use the 'text' field. + + Attributes: + mime_type (str): + The IANA standard MIME type of the source + data. Accepted types include: "image/png", + "image/jpeg", "image/heic", "image/heif", + "image/webp". + data (bytes): + Raw bytes for media formats. + """ + + mime_type: str = proto.Field( + proto.STRING, + number=1, + ) + data: bytes = proto.Field( + proto.BYTES, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/types/generative_service.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/types/generative_service.py new file mode 100644 index 000000000000..596c7579b81c --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/types/generative_service.py @@ -0,0 +1,597 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.ai.generativelanguage_v1.types import citation +from google.ai.generativelanguage_v1.types import content as gag_content +from google.ai.generativelanguage_v1.types import safety + +__protobuf__ = proto.module( + package="google.ai.generativelanguage.v1", + manifest={ + "TaskType", + "GenerateContentRequest", + "GenerationConfig", + "GenerateContentResponse", + "Candidate", + "EmbedContentRequest", + "ContentEmbedding", + "EmbedContentResponse", + "BatchEmbedContentsRequest", + "BatchEmbedContentsResponse", + "CountTokensRequest", + "CountTokensResponse", + }, +) + + +class TaskType(proto.Enum): + r"""Type of task for which the embedding will be used. + + Values: + TASK_TYPE_UNSPECIFIED (0): + Unset value, which will default to one of the + other enum values. + RETRIEVAL_QUERY (1): + Specifies the given text is a query in a + search/retrieval setting. + RETRIEVAL_DOCUMENT (2): + Specifies the given text is a document from + the corpus being searched. + SEMANTIC_SIMILARITY (3): + Specifies the given text will be used for + STS. + CLASSIFICATION (4): + Specifies that the given text will be + classified. + CLUSTERING (5): + Specifies that the embeddings will be used + for clustering. + """ + TASK_TYPE_UNSPECIFIED = 0 + RETRIEVAL_QUERY = 1 + RETRIEVAL_DOCUMENT = 2 + SEMANTIC_SIMILARITY = 3 + CLASSIFICATION = 4 + CLUSTERING = 5 + + +class GenerateContentRequest(proto.Message): + r"""Request to generate a completion from the model. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + model (str): + Required. The name of the ``Model`` to use for generating + the completion. + + Format: ``name=models/{model}``. + contents (MutableSequence[google.ai.generativelanguage_v1.types.Content]): + Required. The content of the current + conversation with the model. + For single-turn queries, this is a single + instance. For multi-turn queries, this is a + repeated field that contains conversation + history + latest request. + safety_settings (MutableSequence[google.ai.generativelanguage_v1.types.SafetySetting]): + Optional. A list of unique ``SafetySetting`` instances for + blocking unsafe content. + + This will be enforced on the + ``GenerateContentRequest.contents`` and + ``GenerateContentResponse.candidates``. There should not be + more than one setting for each ``SafetyCategory`` type. The + API will block any contents and responses that fail to meet + the thresholds set by these settings. This list overrides + the default settings for each ``SafetyCategory`` specified + in the safety_settings. If there is no ``SafetySetting`` for + a given ``SafetyCategory`` provided in the list, the API + will use the default safety setting for that category. Harm + categories HARM_CATEGORY_HATE_SPEECH, + HARM_CATEGORY_SEXUALLY_EXPLICIT, + HARM_CATEGORY_DANGEROUS_CONTENT, HARM_CATEGORY_HARASSMENT + are supported. + generation_config (google.ai.generativelanguage_v1.types.GenerationConfig): + Optional. Configuration options for model + generation and outputs. + + This field is a member of `oneof`_ ``_generation_config``. + """ + + model: str = proto.Field( + proto.STRING, + number=1, + ) + contents: MutableSequence[gag_content.Content] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=gag_content.Content, + ) + safety_settings: MutableSequence[safety.SafetySetting] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=safety.SafetySetting, + ) + generation_config: "GenerationConfig" = proto.Field( + proto.MESSAGE, + number=4, + optional=True, + message="GenerationConfig", + ) + + +class GenerationConfig(proto.Message): + r"""Configuration options for model generation and outputs. Not + all parameters may be configurable for every model. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + candidate_count (int): + Optional. Number of generated responses to return. + + This value must be between [1, 8], inclusive. If unset, this + will default to 1. + + This field is a member of `oneof`_ ``_candidate_count``. + stop_sequences (MutableSequence[str]): + Optional. The set of character sequences (up + to 5) that will stop output generation. If + specified, the API will stop at the first + appearance of a stop sequence. The stop sequence + will not be included as part of the response. + max_output_tokens (int): + Optional. The maximum number of tokens to include in a + candidate. + + If unset, this will default to output_token_limit specified + in the ``Model`` specification. + + This field is a member of `oneof`_ ``_max_output_tokens``. + temperature (float): + Optional. Controls the randomness of the output. Note: The + default value varies by model, see the ``Model.temperature`` + attribute of the ``Model`` returned the ``getModel`` + function. + + Values can range from [0.0,1.0], inclusive. A value closer + to 1.0 will produce responses that are more varied and + creative, while a value closer to 0.0 will typically result + in more straightforward responses from the model. + + This field is a member of `oneof`_ ``_temperature``. + top_p (float): + Optional. The maximum cumulative probability of tokens to + consider when sampling. + + The model uses combined Top-k and nucleus sampling. + + Tokens are sorted based on their assigned probabilities so + that only the most likely tokens are considered. Top-k + sampling directly limits the maximum number of tokens to + consider, while Nucleus sampling limits number of tokens + based on the cumulative probability. + + Note: The default value varies by model, see the + ``Model.top_p`` attribute of the ``Model`` returned the + ``getModel`` function. + + This field is a member of `oneof`_ ``_top_p``. + top_k (int): + Optional. The maximum number of tokens to consider when + sampling. + + The model uses combined Top-k and nucleus sampling. + + Top-k sampling considers the set of ``top_k`` most probable + tokens. Defaults to 40. + + Note: The default value varies by model, see the + ``Model.top_k`` attribute of the ``Model`` returned the + ``getModel`` function. + + This field is a member of `oneof`_ ``_top_k``. + """ + + candidate_count: int = proto.Field( + proto.INT32, + number=1, + optional=True, + ) + stop_sequences: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=2, + ) + max_output_tokens: int = proto.Field( + proto.INT32, + number=4, + optional=True, + ) + temperature: float = proto.Field( + proto.FLOAT, + number=5, + optional=True, + ) + top_p: float = proto.Field( + proto.FLOAT, + number=6, + optional=True, + ) + top_k: int = proto.Field( + proto.INT32, + number=7, + optional=True, + ) + + +class GenerateContentResponse(proto.Message): + r"""Response from the model supporting multiple candidates. + + Note on safety ratings and content filtering. They are reported for + both prompt in ``GenerateContentResponse.prompt_feedback`` and for + each candidate in ``finish_reason`` and in ``safety_ratings``. The + API contract is that: + + - either all requested candidates are returned or no candidates at + all + - no candidates are returned only if there was something wrong with + the prompt (see ``prompt_feedback``) + - feedback on each candidate is reported on ``finish_reason`` and + ``safety_ratings``. + + Attributes: + candidates (MutableSequence[google.ai.generativelanguage_v1.types.Candidate]): + Candidate responses from the model. + prompt_feedback (google.ai.generativelanguage_v1.types.GenerateContentResponse.PromptFeedback): + Returns the prompt's feedback related to the + content filters. + """ + + class PromptFeedback(proto.Message): + r"""A set of the feedback metadata the prompt specified in + ``GenerateContentRequest.content``. + + Attributes: + block_reason (google.ai.generativelanguage_v1.types.GenerateContentResponse.PromptFeedback.BlockReason): + Optional. If set, the prompt was blocked and + no candidates are returned. Rephrase your + prompt. + safety_ratings (MutableSequence[google.ai.generativelanguage_v1.types.SafetyRating]): + Ratings for safety of the prompt. + There is at most one rating per category. + """ + + class BlockReason(proto.Enum): + r"""Specifies what was the reason why prompt was blocked. + + Values: + BLOCK_REASON_UNSPECIFIED (0): + Default value. This value is unused. + SAFETY (1): + Prompt was blocked due to safety reasons. You can inspect + ``safety_ratings`` to understand which safety category + blocked it. + OTHER (2): + Prompt was blocked due to unknown reaasons. + """ + BLOCK_REASON_UNSPECIFIED = 0 + SAFETY = 1 + OTHER = 2 + + block_reason: "GenerateContentResponse.PromptFeedback.BlockReason" = ( + proto.Field( + proto.ENUM, + number=1, + enum="GenerateContentResponse.PromptFeedback.BlockReason", + ) + ) + safety_ratings: MutableSequence[safety.SafetyRating] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=safety.SafetyRating, + ) + + candidates: MutableSequence["Candidate"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="Candidate", + ) + prompt_feedback: PromptFeedback = proto.Field( + proto.MESSAGE, + number=2, + message=PromptFeedback, + ) + + +class Candidate(proto.Message): + r"""A response candidate generated from the model. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + index (int): + Output only. Index of the candidate in the + list of candidates. + + This field is a member of `oneof`_ ``_index``. + content (google.ai.generativelanguage_v1.types.Content): + Output only. Generated content returned from + the model. + finish_reason (google.ai.generativelanguage_v1.types.Candidate.FinishReason): + Optional. Output only. The reason why the + model stopped generating tokens. + If empty, the model has not stopped generating + the tokens. + safety_ratings (MutableSequence[google.ai.generativelanguage_v1.types.SafetyRating]): + List of ratings for the safety of a response + candidate. + There is at most one rating per category. + citation_metadata (google.ai.generativelanguage_v1.types.CitationMetadata): + Output only. Citation information for model-generated + candidate. + + This field may be populated with recitation information for + any text included in the ``content``. These are passages + that are "recited" from copyrighted material in the + foundational LLM's training data. + token_count (int): + Output only. Token count for this candidate. + """ + + class FinishReason(proto.Enum): + r"""Defines the reason why the model stopped generating tokens. + + Values: + FINISH_REASON_UNSPECIFIED (0): + Default value. This value is unused. + STOP (1): + Natural stop point of the model or provided + stop sequence. + MAX_TOKENS (2): + The maximum number of tokens as specified in + the request was reached. + SAFETY (3): + The candidate content was flagged for safety + reasons. + RECITATION (4): + The candidate content was flagged for + recitation reasons. + OTHER (5): + Unknown reason. + """ + FINISH_REASON_UNSPECIFIED = 0 + STOP = 1 + MAX_TOKENS = 2 + SAFETY = 3 + RECITATION = 4 + OTHER = 5 + + index: int = proto.Field( + proto.INT32, + number=3, + optional=True, + ) + content: gag_content.Content = proto.Field( + proto.MESSAGE, + number=1, + message=gag_content.Content, + ) + finish_reason: FinishReason = proto.Field( + proto.ENUM, + number=2, + enum=FinishReason, + ) + safety_ratings: MutableSequence[safety.SafetyRating] = proto.RepeatedField( + proto.MESSAGE, + number=5, + message=safety.SafetyRating, + ) + citation_metadata: citation.CitationMetadata = proto.Field( + proto.MESSAGE, + number=6, + message=citation.CitationMetadata, + ) + token_count: int = proto.Field( + proto.INT32, + number=7, + ) + + +class EmbedContentRequest(proto.Message): + r"""Request containing the ``Content`` for the model to embed. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + model (str): + Required. The model's resource name. This serves as an ID + for the Model to use. + + This name should match a model name returned by the + ``ListModels`` method. + + Format: ``models/{model}`` + content (google.ai.generativelanguage_v1.types.Content): + Required. The content to embed. Only the ``parts.text`` + fields will be counted. + task_type (google.ai.generativelanguage_v1.types.TaskType): + Optional. Optional task type for which the embeddings will + be used. Can only be set for ``models/embedding-001``. + + This field is a member of `oneof`_ ``_task_type``. + title (str): + Optional. An optional title for the text. Only applicable + when TaskType is ``RETRIEVAL_DOCUMENT``. + + This field is a member of `oneof`_ ``_title``. + """ + + model: str = proto.Field( + proto.STRING, + number=1, + ) + content: gag_content.Content = proto.Field( + proto.MESSAGE, + number=2, + message=gag_content.Content, + ) + task_type: "TaskType" = proto.Field( + proto.ENUM, + number=3, + optional=True, + enum="TaskType", + ) + title: str = proto.Field( + proto.STRING, + number=4, + optional=True, + ) + + +class ContentEmbedding(proto.Message): + r"""A list of floats representing an embedding. + + Attributes: + values (MutableSequence[float]): + The embedding values. + """ + + values: MutableSequence[float] = proto.RepeatedField( + proto.FLOAT, + number=1, + ) + + +class EmbedContentResponse(proto.Message): + r"""The response to an ``EmbedContentRequest``. + + Attributes: + embedding (google.ai.generativelanguage_v1.types.ContentEmbedding): + Output only. The embedding generated from the + input content. + """ + + embedding: "ContentEmbedding" = proto.Field( + proto.MESSAGE, + number=1, + message="ContentEmbedding", + ) + + +class BatchEmbedContentsRequest(proto.Message): + r"""Batch request to get embeddings from the model for a list of + prompts. + + Attributes: + model (str): + Required. The model's resource name. This serves as an ID + for the Model to use. + + This name should match a model name returned by the + ``ListModels`` method. + + Format: ``models/{model}`` + requests (MutableSequence[google.ai.generativelanguage_v1.types.EmbedContentRequest]): + Required. Embed requests for the batch. The model in each of + these requests must match the model specified + ``BatchEmbedContentsRequest.model``. + """ + + model: str = proto.Field( + proto.STRING, + number=1, + ) + requests: MutableSequence["EmbedContentRequest"] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message="EmbedContentRequest", + ) + + +class BatchEmbedContentsResponse(proto.Message): + r"""The response to a ``BatchEmbedContentsRequest``. + + Attributes: + embeddings (MutableSequence[google.ai.generativelanguage_v1.types.ContentEmbedding]): + Output only. The embeddings for each request, + in the same order as provided in the batch + request. + """ + + embeddings: MutableSequence["ContentEmbedding"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="ContentEmbedding", + ) + + +class CountTokensRequest(proto.Message): + r"""Counts the number of tokens in the ``prompt`` sent to a model. + + Models may tokenize text differently, so each model may return a + different ``token_count``. + + Attributes: + model (str): + Required. The model's resource name. This serves as an ID + for the Model to use. + + This name should match a model name returned by the + ``ListModels`` method. + + Format: ``models/{model}`` + contents (MutableSequence[google.ai.generativelanguage_v1.types.Content]): + Required. The input given to the model as a + prompt. + """ + + model: str = proto.Field( + proto.STRING, + number=1, + ) + contents: MutableSequence[gag_content.Content] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=gag_content.Content, + ) + + +class CountTokensResponse(proto.Message): + r"""A response from ``CountTokens``. + + It returns the model's ``token_count`` for the ``prompt``. + + Attributes: + total_tokens (int): + The number of tokens that the ``model`` tokenizes the + ``prompt`` into. + + Always non-negative. + """ + + total_tokens: int = proto.Field( + proto.INT32, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/types/model.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/types/model.py new file mode 100644 index 000000000000..f4d6dad89d1c --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/types/model.py @@ -0,0 +1,155 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +__protobuf__ = proto.module( + package="google.ai.generativelanguage.v1", + manifest={ + "Model", + }, +) + + +class Model(proto.Message): + r"""Information about a Generative Language Model. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + name (str): + Required. The resource name of the ``Model``. + + Format: ``models/{model}`` with a ``{model}`` naming + convention of: + + - "{base_model_id}-{version}" + + Examples: + + - ``models/chat-bison-001`` + base_model_id (str): + Required. The name of the base model, pass this to the + generation request. + + Examples: + + - ``chat-bison`` + version (str): + Required. The version number of the model. + + This represents the major version + display_name (str): + The human-readable name of the model. E.g. + "Chat Bison". + The name can be up to 128 characters long and + can consist of any UTF-8 characters. + description (str): + A short description of the model. + input_token_limit (int): + Maximum number of input tokens allowed for + this model. + output_token_limit (int): + Maximum number of output tokens available for + this model. + supported_generation_methods (MutableSequence[str]): + The model's supported generation methods. + + The method names are defined as Pascal case strings, such as + ``generateMessage`` which correspond to API methods. + temperature (float): + Controls the randomness of the output. + + Values can range over ``[0.0,1.0]``, inclusive. A value + closer to ``1.0`` will produce responses that are more + varied, while a value closer to ``0.0`` will typically + result in less surprising responses from the model. This + value specifies default to be used by the backend while + making the call to the model. + + This field is a member of `oneof`_ ``_temperature``. + top_p (float): + For Nucleus sampling. + + Nucleus sampling considers the smallest set of tokens whose + probability sum is at least ``top_p``. This value specifies + default to be used by the backend while making the call to + the model. + + This field is a member of `oneof`_ ``_top_p``. + top_k (int): + For Top-k sampling. + + Top-k sampling considers the set of ``top_k`` most probable + tokens. This value specifies default to be used by the + backend while making the call to the model. + + This field is a member of `oneof`_ ``_top_k``. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + base_model_id: str = proto.Field( + proto.STRING, + number=2, + ) + version: str = proto.Field( + proto.STRING, + number=3, + ) + display_name: str = proto.Field( + proto.STRING, + number=4, + ) + description: str = proto.Field( + proto.STRING, + number=5, + ) + input_token_limit: int = proto.Field( + proto.INT32, + number=6, + ) + output_token_limit: int = proto.Field( + proto.INT32, + number=7, + ) + supported_generation_methods: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=8, + ) + temperature: float = proto.Field( + proto.FLOAT, + number=9, + optional=True, + ) + top_p: float = proto.Field( + proto.FLOAT, + number=10, + optional=True, + ) + top_k: int = proto.Field( + proto.INT32, + number=11, + optional=True, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/types/model_service.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/types/model_service.py new file mode 100644 index 000000000000..7494a8d2a179 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/types/model_service.py @@ -0,0 +1,113 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.ai.generativelanguage_v1.types import model + +__protobuf__ = proto.module( + package="google.ai.generativelanguage.v1", + manifest={ + "GetModelRequest", + "ListModelsRequest", + "ListModelsResponse", + }, +) + + +class GetModelRequest(proto.Message): + r"""Request for getting information about a specific Model. + + Attributes: + name (str): + Required. The resource name of the model. + + This name should match a model name returned by the + ``ListModels`` method. + + Format: ``models/{model}`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListModelsRequest(proto.Message): + r"""Request for listing all Models. + + Attributes: + page_size (int): + The maximum number of ``Models`` to return (per page). + + The service may return fewer models. If unspecified, at most + 50 models will be returned per page. This method returns at + most 1000 models per page, even if you pass a larger + page_size. + page_token (str): + A page token, received from a previous ``ListModels`` call. + + Provide the ``page_token`` returned by one request as an + argument to the next request to retrieve the next page. + + When paginating, all other parameters provided to + ``ListModels`` must match the call that provided the page + token. + """ + + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + + +class ListModelsResponse(proto.Message): + r"""Response from ``ListModel`` containing a paginated list of Models. + + Attributes: + models (MutableSequence[google.ai.generativelanguage_v1.types.Model]): + The returned Models. + next_page_token (str): + A token, which can be sent as ``page_token`` to retrieve the + next page. + + If this field is omitted, there are no more pages. + """ + + @property + def raw_page(self): + return self + + models: MutableSequence[model.Model] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=model.Model, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/types/safety.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/types/safety.py new file mode 100644 index 000000000000..2548c0f301cb --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/types/safety.py @@ -0,0 +1,192 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +__protobuf__ = proto.module( + package="google.ai.generativelanguage.v1", + manifest={ + "HarmCategory", + "SafetyRating", + "SafetySetting", + }, +) + + +class HarmCategory(proto.Enum): + r"""The category of a rating. + + These categories cover various kinds of harms that developers + may wish to adjust. + + Values: + HARM_CATEGORY_UNSPECIFIED (0): + Category is unspecified. + HARM_CATEGORY_DEROGATORY (1): + Negative or harmful comments targeting + identity and/or protected attribute. + HARM_CATEGORY_TOXICITY (2): + Content that is rude, disrepspectful, or + profane. + HARM_CATEGORY_VIOLENCE (3): + Describes scenarios depictng violence against + an individual or group, or general descriptions + of gore. + HARM_CATEGORY_SEXUAL (4): + Contains references to sexual acts or other + lewd content. + HARM_CATEGORY_MEDICAL (5): + Promotes unchecked medical advice. + HARM_CATEGORY_DANGEROUS (6): + Dangerous content that promotes, facilitates, + or encourages harmful acts. + HARM_CATEGORY_HARASSMENT (7): + Harasment content. + HARM_CATEGORY_HATE_SPEECH (8): + Hate speech and content. + HARM_CATEGORY_SEXUALLY_EXPLICIT (9): + Sexually explicit content. + HARM_CATEGORY_DANGEROUS_CONTENT (10): + Dangerous content. + """ + HARM_CATEGORY_UNSPECIFIED = 0 + HARM_CATEGORY_DEROGATORY = 1 + HARM_CATEGORY_TOXICITY = 2 + HARM_CATEGORY_VIOLENCE = 3 + HARM_CATEGORY_SEXUAL = 4 + HARM_CATEGORY_MEDICAL = 5 + HARM_CATEGORY_DANGEROUS = 6 + HARM_CATEGORY_HARASSMENT = 7 + HARM_CATEGORY_HATE_SPEECH = 8 + HARM_CATEGORY_SEXUALLY_EXPLICIT = 9 + HARM_CATEGORY_DANGEROUS_CONTENT = 10 + + +class SafetyRating(proto.Message): + r"""Safety rating for a piece of content. + + The safety rating contains the category of harm and the harm + probability level in that category for a piece of content. + Content is classified for safety across a number of harm + categories and the probability of the harm classification is + included here. + + Attributes: + category (google.ai.generativelanguage_v1.types.HarmCategory): + Required. The category for this rating. + probability (google.ai.generativelanguage_v1.types.SafetyRating.HarmProbability): + Required. The probability of harm for this + content. + blocked (bool): + Was this content blocked because of this + rating? + """ + + class HarmProbability(proto.Enum): + r"""The probability that a piece of content is harmful. + + The classification system gives the probability of the content + being unsafe. This does not indicate the severity of harm for a + piece of content. + + Values: + HARM_PROBABILITY_UNSPECIFIED (0): + Probability is unspecified. + NEGLIGIBLE (1): + Content has a negligible chance of being + unsafe. + LOW (2): + Content has a low chance of being unsafe. + MEDIUM (3): + Content has a medium chance of being unsafe. + HIGH (4): + Content has a high chance of being unsafe. + """ + HARM_PROBABILITY_UNSPECIFIED = 0 + NEGLIGIBLE = 1 + LOW = 2 + MEDIUM = 3 + HIGH = 4 + + category: "HarmCategory" = proto.Field( + proto.ENUM, + number=3, + enum="HarmCategory", + ) + probability: HarmProbability = proto.Field( + proto.ENUM, + number=4, + enum=HarmProbability, + ) + blocked: bool = proto.Field( + proto.BOOL, + number=5, + ) + + +class SafetySetting(proto.Message): + r"""Safety setting, affecting the safety-blocking behavior. + + Passing a safety setting for a category changes the allowed + proability that content is blocked. + + Attributes: + category (google.ai.generativelanguage_v1.types.HarmCategory): + Required. The category for this setting. + threshold (google.ai.generativelanguage_v1.types.SafetySetting.HarmBlockThreshold): + Required. Controls the probability threshold + at which harm is blocked. + """ + + class HarmBlockThreshold(proto.Enum): + r"""Block at and beyond a specified harm probability. + + Values: + HARM_BLOCK_THRESHOLD_UNSPECIFIED (0): + Threshold is unspecified. + BLOCK_LOW_AND_ABOVE (1): + Content with NEGLIGIBLE will be allowed. + BLOCK_MEDIUM_AND_ABOVE (2): + Content with NEGLIGIBLE and LOW will be + allowed. + BLOCK_ONLY_HIGH (3): + Content with NEGLIGIBLE, LOW, and MEDIUM will + be allowed. + BLOCK_NONE (4): + All content will be allowed. + """ + HARM_BLOCK_THRESHOLD_UNSPECIFIED = 0 + BLOCK_LOW_AND_ABOVE = 1 + BLOCK_MEDIUM_AND_ABOVE = 2 + BLOCK_ONLY_HIGH = 3 + BLOCK_NONE = 4 + + category: "HarmCategory" = proto.Field( + proto.ENUM, + number=3, + enum="HarmCategory", + ) + threshold: HarmBlockThreshold = proto.Field( + proto.ENUM, + number=4, + enum=HarmBlockThreshold, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/__init__.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/__init__.py new file mode 100644 index 000000000000..77571d3938df --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/__init__.py @@ -0,0 +1,303 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from google.ai.generativelanguage_v1beta import gapic_version as package_version + +__version__ = package_version.__version__ + + +from .services.discuss_service import DiscussServiceAsyncClient, DiscussServiceClient +from .services.generative_service import ( + GenerativeServiceAsyncClient, + GenerativeServiceClient, +) +from .services.model_service import ModelServiceAsyncClient, ModelServiceClient +from .services.permission_service import ( + PermissionServiceAsyncClient, + PermissionServiceClient, +) +from .services.retriever_service import ( + RetrieverServiceAsyncClient, + RetrieverServiceClient, +) +from .services.text_service import TextServiceAsyncClient, TextServiceClient +from .types.citation import CitationMetadata, CitationSource +from .types.content import ( + Blob, + Content, + FunctionCall, + FunctionDeclaration, + FunctionResponse, + GroundingPassage, + GroundingPassages, + Part, + Schema, + Tool, + Type, +) +from .types.discuss_service import ( + CountMessageTokensRequest, + CountMessageTokensResponse, + Example, + GenerateMessageRequest, + GenerateMessageResponse, + Message, + MessagePrompt, +) +from .types.generative_service import ( + AttributionSourceId, + BatchEmbedContentsRequest, + BatchEmbedContentsResponse, + Candidate, + ContentEmbedding, + CountTokensRequest, + CountTokensResponse, + EmbedContentRequest, + EmbedContentResponse, + GenerateAnswerRequest, + GenerateAnswerResponse, + GenerateContentRequest, + GenerateContentResponse, + GenerationConfig, + GroundingAttribution, + SemanticRetrieverConfig, + TaskType, +) +from .types.model import Model +from .types.model_service import ( + CreateTunedModelMetadata, + CreateTunedModelRequest, + DeleteTunedModelRequest, + GetModelRequest, + GetTunedModelRequest, + ListModelsRequest, + ListModelsResponse, + ListTunedModelsRequest, + ListTunedModelsResponse, + UpdateTunedModelRequest, +) +from .types.permission import Permission +from .types.permission_service import ( + CreatePermissionRequest, + DeletePermissionRequest, + GetPermissionRequest, + ListPermissionsRequest, + ListPermissionsResponse, + TransferOwnershipRequest, + TransferOwnershipResponse, + UpdatePermissionRequest, +) +from .types.retriever import ( + Chunk, + ChunkData, + Condition, + Corpus, + CustomMetadata, + Document, + MetadataFilter, + StringList, +) +from .types.retriever_service import ( + BatchCreateChunksRequest, + BatchCreateChunksResponse, + BatchDeleteChunksRequest, + BatchUpdateChunksRequest, + BatchUpdateChunksResponse, + CreateChunkRequest, + CreateCorpusRequest, + CreateDocumentRequest, + DeleteChunkRequest, + DeleteCorpusRequest, + DeleteDocumentRequest, + GetChunkRequest, + GetCorpusRequest, + GetDocumentRequest, + ListChunksRequest, + ListChunksResponse, + ListCorporaRequest, + ListCorporaResponse, + ListDocumentsRequest, + ListDocumentsResponse, + QueryCorpusRequest, + QueryCorpusResponse, + QueryDocumentRequest, + QueryDocumentResponse, + RelevantChunk, + UpdateChunkRequest, + UpdateCorpusRequest, + UpdateDocumentRequest, +) +from .types.safety import ( + ContentFilter, + HarmCategory, + SafetyFeedback, + SafetyRating, + SafetySetting, +) +from .types.text_service import ( + BatchEmbedTextRequest, + BatchEmbedTextResponse, + CountTextTokensRequest, + CountTextTokensResponse, + Embedding, + EmbedTextRequest, + EmbedTextResponse, + GenerateTextRequest, + GenerateTextResponse, + TextCompletion, + TextPrompt, +) +from .types.tuned_model import ( + Dataset, + Hyperparameters, + TunedModel, + TunedModelSource, + TuningExample, + TuningExamples, + TuningSnapshot, + TuningTask, +) + +__all__ = ( + "DiscussServiceAsyncClient", + "GenerativeServiceAsyncClient", + "ModelServiceAsyncClient", + "PermissionServiceAsyncClient", + "RetrieverServiceAsyncClient", + "TextServiceAsyncClient", + "AttributionSourceId", + "BatchCreateChunksRequest", + "BatchCreateChunksResponse", + "BatchDeleteChunksRequest", + "BatchEmbedContentsRequest", + "BatchEmbedContentsResponse", + "BatchEmbedTextRequest", + "BatchEmbedTextResponse", + "BatchUpdateChunksRequest", + "BatchUpdateChunksResponse", + "Blob", + "Candidate", + "Chunk", + "ChunkData", + "CitationMetadata", + "CitationSource", + "Condition", + "Content", + "ContentEmbedding", + "ContentFilter", + "Corpus", + "CountMessageTokensRequest", + "CountMessageTokensResponse", + "CountTextTokensRequest", + "CountTextTokensResponse", + "CountTokensRequest", + "CountTokensResponse", + "CreateChunkRequest", + "CreateCorpusRequest", + "CreateDocumentRequest", + "CreatePermissionRequest", + "CreateTunedModelMetadata", + "CreateTunedModelRequest", + "CustomMetadata", + "Dataset", + "DeleteChunkRequest", + "DeleteCorpusRequest", + "DeleteDocumentRequest", + "DeletePermissionRequest", + "DeleteTunedModelRequest", + "DiscussServiceClient", + "Document", + "EmbedContentRequest", + "EmbedContentResponse", + "EmbedTextRequest", + "EmbedTextResponse", + "Embedding", + "Example", + "FunctionCall", + "FunctionDeclaration", + "FunctionResponse", + "GenerateAnswerRequest", + "GenerateAnswerResponse", + "GenerateContentRequest", + "GenerateContentResponse", + "GenerateMessageRequest", + "GenerateMessageResponse", + "GenerateTextRequest", + "GenerateTextResponse", + "GenerationConfig", + "GenerativeServiceClient", + "GetChunkRequest", + "GetCorpusRequest", + "GetDocumentRequest", + "GetModelRequest", + "GetPermissionRequest", + "GetTunedModelRequest", + "GroundingAttribution", + "GroundingPassage", + "GroundingPassages", + "HarmCategory", + "Hyperparameters", + "ListChunksRequest", + "ListChunksResponse", + "ListCorporaRequest", + "ListCorporaResponse", + "ListDocumentsRequest", + "ListDocumentsResponse", + "ListModelsRequest", + "ListModelsResponse", + "ListPermissionsRequest", + "ListPermissionsResponse", + "ListTunedModelsRequest", + "ListTunedModelsResponse", + "Message", + "MessagePrompt", + "MetadataFilter", + "Model", + "ModelServiceClient", + "Part", + "Permission", + "PermissionServiceClient", + "QueryCorpusRequest", + "QueryCorpusResponse", + "QueryDocumentRequest", + "QueryDocumentResponse", + "RelevantChunk", + "RetrieverServiceClient", + "SafetyFeedback", + "SafetyRating", + "SafetySetting", + "Schema", + "SemanticRetrieverConfig", + "StringList", + "TaskType", + "TextCompletion", + "TextPrompt", + "TextServiceClient", + "Tool", + "TransferOwnershipRequest", + "TransferOwnershipResponse", + "TunedModel", + "TunedModelSource", + "TuningExample", + "TuningExamples", + "TuningSnapshot", + "TuningTask", + "Type", + "UpdateChunkRequest", + "UpdateCorpusRequest", + "UpdateDocumentRequest", + "UpdatePermissionRequest", + "UpdateTunedModelRequest", +) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/gapic_metadata.json b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/gapic_metadata.json new file mode 100644 index 000000000000..c7fd001bd974 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/gapic_metadata.json @@ -0,0 +1,798 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.ai.generativelanguage_v1beta", + "protoPackage": "google.ai.generativelanguage.v1beta", + "schema": "1.0", + "services": { + "DiscussService": { + "clients": { + "grpc": { + "libraryClient": "DiscussServiceClient", + "rpcs": { + "CountMessageTokens": { + "methods": [ + "count_message_tokens" + ] + }, + "GenerateMessage": { + "methods": [ + "generate_message" + ] + } + } + }, + "grpc-async": { + "libraryClient": "DiscussServiceAsyncClient", + "rpcs": { + "CountMessageTokens": { + "methods": [ + "count_message_tokens" + ] + }, + "GenerateMessage": { + "methods": [ + "generate_message" + ] + } + } + }, + "rest": { + "libraryClient": "DiscussServiceClient", + "rpcs": { + "CountMessageTokens": { + "methods": [ + "count_message_tokens" + ] + }, + "GenerateMessage": { + "methods": [ + "generate_message" + ] + } + } + } + } + }, + "GenerativeService": { + "clients": { + "grpc": { + "libraryClient": "GenerativeServiceClient", + "rpcs": { + "BatchEmbedContents": { + "methods": [ + "batch_embed_contents" + ] + }, + "CountTokens": { + "methods": [ + "count_tokens" + ] + }, + "EmbedContent": { + "methods": [ + "embed_content" + ] + }, + "GenerateAnswer": { + "methods": [ + "generate_answer" + ] + }, + "GenerateContent": { + "methods": [ + "generate_content" + ] + }, + "StreamGenerateContent": { + "methods": [ + "stream_generate_content" + ] + } + } + }, + "grpc-async": { + "libraryClient": "GenerativeServiceAsyncClient", + "rpcs": { + "BatchEmbedContents": { + "methods": [ + "batch_embed_contents" + ] + }, + "CountTokens": { + "methods": [ + "count_tokens" + ] + }, + "EmbedContent": { + "methods": [ + "embed_content" + ] + }, + "GenerateAnswer": { + "methods": [ + "generate_answer" + ] + }, + "GenerateContent": { + "methods": [ + "generate_content" + ] + }, + "StreamGenerateContent": { + "methods": [ + "stream_generate_content" + ] + } + } + }, + "rest": { + "libraryClient": "GenerativeServiceClient", + "rpcs": { + "BatchEmbedContents": { + "methods": [ + "batch_embed_contents" + ] + }, + "CountTokens": { + "methods": [ + "count_tokens" + ] + }, + "EmbedContent": { + "methods": [ + "embed_content" + ] + }, + "GenerateAnswer": { + "methods": [ + "generate_answer" + ] + }, + "GenerateContent": { + "methods": [ + "generate_content" + ] + }, + "StreamGenerateContent": { + "methods": [ + "stream_generate_content" + ] + } + } + } + } + }, + "ModelService": { + "clients": { + "grpc": { + "libraryClient": "ModelServiceClient", + "rpcs": { + "CreateTunedModel": { + "methods": [ + "create_tuned_model" + ] + }, + "DeleteTunedModel": { + "methods": [ + "delete_tuned_model" + ] + }, + "GetModel": { + "methods": [ + "get_model" + ] + }, + "GetTunedModel": { + "methods": [ + "get_tuned_model" + ] + }, + "ListModels": { + "methods": [ + "list_models" + ] + }, + "ListTunedModels": { + "methods": [ + "list_tuned_models" + ] + }, + "UpdateTunedModel": { + "methods": [ + "update_tuned_model" + ] + } + } + }, + "grpc-async": { + "libraryClient": "ModelServiceAsyncClient", + "rpcs": { + "CreateTunedModel": { + "methods": [ + "create_tuned_model" + ] + }, + "DeleteTunedModel": { + "methods": [ + "delete_tuned_model" + ] + }, + "GetModel": { + "methods": [ + "get_model" + ] + }, + "GetTunedModel": { + "methods": [ + "get_tuned_model" + ] + }, + "ListModels": { + "methods": [ + "list_models" + ] + }, + "ListTunedModels": { + "methods": [ + "list_tuned_models" + ] + }, + "UpdateTunedModel": { + "methods": [ + "update_tuned_model" + ] + } + } + }, + "rest": { + "libraryClient": "ModelServiceClient", + "rpcs": { + "CreateTunedModel": { + "methods": [ + "create_tuned_model" + ] + }, + "DeleteTunedModel": { + "methods": [ + "delete_tuned_model" + ] + }, + "GetModel": { + "methods": [ + "get_model" + ] + }, + "GetTunedModel": { + "methods": [ + "get_tuned_model" + ] + }, + "ListModels": { + "methods": [ + "list_models" + ] + }, + "ListTunedModels": { + "methods": [ + "list_tuned_models" + ] + }, + "UpdateTunedModel": { + "methods": [ + "update_tuned_model" + ] + } + } + } + } + }, + "PermissionService": { + "clients": { + "grpc": { + "libraryClient": "PermissionServiceClient", + "rpcs": { + "CreatePermission": { + "methods": [ + "create_permission" + ] + }, + "DeletePermission": { + "methods": [ + "delete_permission" + ] + }, + "GetPermission": { + "methods": [ + "get_permission" + ] + }, + "ListPermissions": { + "methods": [ + "list_permissions" + ] + }, + "TransferOwnership": { + "methods": [ + "transfer_ownership" + ] + }, + "UpdatePermission": { + "methods": [ + "update_permission" + ] + } + } + }, + "grpc-async": { + "libraryClient": "PermissionServiceAsyncClient", + "rpcs": { + "CreatePermission": { + "methods": [ + "create_permission" + ] + }, + "DeletePermission": { + "methods": [ + "delete_permission" + ] + }, + "GetPermission": { + "methods": [ + "get_permission" + ] + }, + "ListPermissions": { + "methods": [ + "list_permissions" + ] + }, + "TransferOwnership": { + "methods": [ + "transfer_ownership" + ] + }, + "UpdatePermission": { + "methods": [ + "update_permission" + ] + } + } + }, + "rest": { + "libraryClient": "PermissionServiceClient", + "rpcs": { + "CreatePermission": { + "methods": [ + "create_permission" + ] + }, + "DeletePermission": { + "methods": [ + "delete_permission" + ] + }, + "GetPermission": { + "methods": [ + "get_permission" + ] + }, + "ListPermissions": { + "methods": [ + "list_permissions" + ] + }, + "TransferOwnership": { + "methods": [ + "transfer_ownership" + ] + }, + "UpdatePermission": { + "methods": [ + "update_permission" + ] + } + } + } + } + }, + "RetrieverService": { + "clients": { + "grpc": { + "libraryClient": "RetrieverServiceClient", + "rpcs": { + "BatchCreateChunks": { + "methods": [ + "batch_create_chunks" + ] + }, + "BatchDeleteChunks": { + "methods": [ + "batch_delete_chunks" + ] + }, + "BatchUpdateChunks": { + "methods": [ + "batch_update_chunks" + ] + }, + "CreateChunk": { + "methods": [ + "create_chunk" + ] + }, + "CreateCorpus": { + "methods": [ + "create_corpus" + ] + }, + "CreateDocument": { + "methods": [ + "create_document" + ] + }, + "DeleteChunk": { + "methods": [ + "delete_chunk" + ] + }, + "DeleteCorpus": { + "methods": [ + "delete_corpus" + ] + }, + "DeleteDocument": { + "methods": [ + "delete_document" + ] + }, + "GetChunk": { + "methods": [ + "get_chunk" + ] + }, + "GetCorpus": { + "methods": [ + "get_corpus" + ] + }, + "GetDocument": { + "methods": [ + "get_document" + ] + }, + "ListChunks": { + "methods": [ + "list_chunks" + ] + }, + "ListCorpora": { + "methods": [ + "list_corpora" + ] + }, + "ListDocuments": { + "methods": [ + "list_documents" + ] + }, + "QueryCorpus": { + "methods": [ + "query_corpus" + ] + }, + "QueryDocument": { + "methods": [ + "query_document" + ] + }, + "UpdateChunk": { + "methods": [ + "update_chunk" + ] + }, + "UpdateCorpus": { + "methods": [ + "update_corpus" + ] + }, + "UpdateDocument": { + "methods": [ + "update_document" + ] + } + } + }, + "grpc-async": { + "libraryClient": "RetrieverServiceAsyncClient", + "rpcs": { + "BatchCreateChunks": { + "methods": [ + "batch_create_chunks" + ] + }, + "BatchDeleteChunks": { + "methods": [ + "batch_delete_chunks" + ] + }, + "BatchUpdateChunks": { + "methods": [ + "batch_update_chunks" + ] + }, + "CreateChunk": { + "methods": [ + "create_chunk" + ] + }, + "CreateCorpus": { + "methods": [ + "create_corpus" + ] + }, + "CreateDocument": { + "methods": [ + "create_document" + ] + }, + "DeleteChunk": { + "methods": [ + "delete_chunk" + ] + }, + "DeleteCorpus": { + "methods": [ + "delete_corpus" + ] + }, + "DeleteDocument": { + "methods": [ + "delete_document" + ] + }, + "GetChunk": { + "methods": [ + "get_chunk" + ] + }, + "GetCorpus": { + "methods": [ + "get_corpus" + ] + }, + "GetDocument": { + "methods": [ + "get_document" + ] + }, + "ListChunks": { + "methods": [ + "list_chunks" + ] + }, + "ListCorpora": { + "methods": [ + "list_corpora" + ] + }, + "ListDocuments": { + "methods": [ + "list_documents" + ] + }, + "QueryCorpus": { + "methods": [ + "query_corpus" + ] + }, + "QueryDocument": { + "methods": [ + "query_document" + ] + }, + "UpdateChunk": { + "methods": [ + "update_chunk" + ] + }, + "UpdateCorpus": { + "methods": [ + "update_corpus" + ] + }, + "UpdateDocument": { + "methods": [ + "update_document" + ] + } + } + }, + "rest": { + "libraryClient": "RetrieverServiceClient", + "rpcs": { + "BatchCreateChunks": { + "methods": [ + "batch_create_chunks" + ] + }, + "BatchDeleteChunks": { + "methods": [ + "batch_delete_chunks" + ] + }, + "BatchUpdateChunks": { + "methods": [ + "batch_update_chunks" + ] + }, + "CreateChunk": { + "methods": [ + "create_chunk" + ] + }, + "CreateCorpus": { + "methods": [ + "create_corpus" + ] + }, + "CreateDocument": { + "methods": [ + "create_document" + ] + }, + "DeleteChunk": { + "methods": [ + "delete_chunk" + ] + }, + "DeleteCorpus": { + "methods": [ + "delete_corpus" + ] + }, + "DeleteDocument": { + "methods": [ + "delete_document" + ] + }, + "GetChunk": { + "methods": [ + "get_chunk" + ] + }, + "GetCorpus": { + "methods": [ + "get_corpus" + ] + }, + "GetDocument": { + "methods": [ + "get_document" + ] + }, + "ListChunks": { + "methods": [ + "list_chunks" + ] + }, + "ListCorpora": { + "methods": [ + "list_corpora" + ] + }, + "ListDocuments": { + "methods": [ + "list_documents" + ] + }, + "QueryCorpus": { + "methods": [ + "query_corpus" + ] + }, + "QueryDocument": { + "methods": [ + "query_document" + ] + }, + "UpdateChunk": { + "methods": [ + "update_chunk" + ] + }, + "UpdateCorpus": { + "methods": [ + "update_corpus" + ] + }, + "UpdateDocument": { + "methods": [ + "update_document" + ] + } + } + } + } + }, + "TextService": { + "clients": { + "grpc": { + "libraryClient": "TextServiceClient", + "rpcs": { + "BatchEmbedText": { + "methods": [ + "batch_embed_text" + ] + }, + "CountTextTokens": { + "methods": [ + "count_text_tokens" + ] + }, + "EmbedText": { + "methods": [ + "embed_text" + ] + }, + "GenerateText": { + "methods": [ + "generate_text" + ] + } + } + }, + "grpc-async": { + "libraryClient": "TextServiceAsyncClient", + "rpcs": { + "BatchEmbedText": { + "methods": [ + "batch_embed_text" + ] + }, + "CountTextTokens": { + "methods": [ + "count_text_tokens" + ] + }, + "EmbedText": { + "methods": [ + "embed_text" + ] + }, + "GenerateText": { + "methods": [ + "generate_text" + ] + } + } + }, + "rest": { + "libraryClient": "TextServiceClient", + "rpcs": { + "BatchEmbedText": { + "methods": [ + "batch_embed_text" + ] + }, + "CountTextTokens": { + "methods": [ + "count_text_tokens" + ] + }, + "EmbedText": { + "methods": [ + "embed_text" + ] + }, + "GenerateText": { + "methods": [ + "generate_text" + ] + } + } + } + } + } + } +} diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/gapic_version.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/gapic_version.py new file mode 100644 index 000000000000..360a0d13ebdd --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/gapic_version.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +__version__ = "0.0.0" # {x-release-please-version} diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/py.typed b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/py.typed new file mode 100644 index 000000000000..38773eee6363 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-ai-generativelanguage package uses inline types. diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/__init__.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/__init__.py new file mode 100644 index 000000000000..89a37dc92c5a --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/discuss_service/__init__.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/discuss_service/__init__.py new file mode 100644 index 000000000000..2247026798d5 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/discuss_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .async_client import DiscussServiceAsyncClient +from .client import DiscussServiceClient + +__all__ = ( + "DiscussServiceClient", + "DiscussServiceAsyncClient", +) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/discuss_service/async_client.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/discuss_service/async_client.py new file mode 100644 index 000000000000..29b97ea0607f --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/discuss_service/async_client.py @@ -0,0 +1,564 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry_async as retries +from google.api_core.client_options import ClientOptions +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.ai.generativelanguage_v1beta import gapic_version as package_version + +try: + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore + +from google.longrunning import operations_pb2 # type: ignore + +from google.ai.generativelanguage_v1beta.types import discuss_service, safety + +from .client import DiscussServiceClient +from .transports.base import DEFAULT_CLIENT_INFO, DiscussServiceTransport +from .transports.grpc_asyncio import DiscussServiceGrpcAsyncIOTransport + + +class DiscussServiceAsyncClient: + """An API for using Generative Language Models (GLMs) in dialog + applications. + Also known as large language models (LLMs), this API provides + models that are trained for multi-turn dialog. + """ + + _client: DiscussServiceClient + + DEFAULT_ENDPOINT = DiscussServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = DiscussServiceClient.DEFAULT_MTLS_ENDPOINT + + model_path = staticmethod(DiscussServiceClient.model_path) + parse_model_path = staticmethod(DiscussServiceClient.parse_model_path) + common_billing_account_path = staticmethod( + DiscussServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + DiscussServiceClient.parse_common_billing_account_path + ) + common_folder_path = staticmethod(DiscussServiceClient.common_folder_path) + parse_common_folder_path = staticmethod( + DiscussServiceClient.parse_common_folder_path + ) + common_organization_path = staticmethod( + DiscussServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + DiscussServiceClient.parse_common_organization_path + ) + common_project_path = staticmethod(DiscussServiceClient.common_project_path) + parse_common_project_path = staticmethod( + DiscussServiceClient.parse_common_project_path + ) + common_location_path = staticmethod(DiscussServiceClient.common_location_path) + parse_common_location_path = staticmethod( + DiscussServiceClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DiscussServiceAsyncClient: The constructed client. + """ + return DiscussServiceClient.from_service_account_info.__func__(DiscussServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DiscussServiceAsyncClient: The constructed client. + """ + return DiscussServiceClient.from_service_account_file.__func__(DiscussServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return DiscussServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> DiscussServiceTransport: + """Returns the transport used by the client instance. + + Returns: + DiscussServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial( + type(DiscussServiceClient).get_transport_class, type(DiscussServiceClient) + ) + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, DiscussServiceTransport] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the discuss service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.DiscussServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = DiscussServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def generate_message( + self, + request: Optional[Union[discuss_service.GenerateMessageRequest, dict]] = None, + *, + model: Optional[str] = None, + prompt: Optional[discuss_service.MessagePrompt] = None, + temperature: Optional[float] = None, + candidate_count: Optional[int] = None, + top_p: Optional[float] = None, + top_k: Optional[int] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> discuss_service.GenerateMessageResponse: + r"""Generates a response from the model given an input + ``MessagePrompt``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_generate_message(): + # Create a client + client = generativelanguage_v1beta.DiscussServiceAsyncClient() + + # Initialize request argument(s) + prompt = generativelanguage_v1beta.MessagePrompt() + prompt.messages.content = "content_value" + + request = generativelanguage_v1beta.GenerateMessageRequest( + model="model_value", + prompt=prompt, + ) + + # Make the request + response = await client.generate_message(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.GenerateMessageRequest, dict]]): + The request object. Request to generate a message + response from the model. + model (:class:`str`): + Required. The name of the model to use. + + Format: ``name=models/{model}``. + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + prompt (:class:`google.ai.generativelanguage_v1beta.types.MessagePrompt`): + Required. The structured textual + input given to the model as a prompt. + Given a + prompt, the model will return what it + predicts is the next message in the + discussion. + + This corresponds to the ``prompt`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + temperature (:class:`float`): + Optional. Controls the randomness of the output. + + Values can range over ``[0.0,1.0]``, inclusive. A value + closer to ``1.0`` will produce responses that are more + varied, while a value closer to ``0.0`` will typically + result in less surprising responses from the model. + + This corresponds to the ``temperature`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + candidate_count (:class:`int`): + Optional. The number of generated response messages to + return. + + This value must be between ``[1, 8]``, inclusive. If + unset, this will default to ``1``. + + This corresponds to the ``candidate_count`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + top_p (:class:`float`): + Optional. The maximum cumulative probability of tokens + to consider when sampling. + + The model uses combined Top-k and nucleus sampling. + + Nucleus sampling considers the smallest set of tokens + whose probability sum is at least ``top_p``. + + This corresponds to the ``top_p`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + top_k (:class:`int`): + Optional. The maximum number of tokens to consider when + sampling. + + The model uses combined Top-k and nucleus sampling. + + Top-k sampling considers the set of ``top_k`` most + probable tokens. + + This corresponds to the ``top_k`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.GenerateMessageResponse: + The response from the model. + + This includes candidate messages and + conversation history in the form of + chronologically-ordered messages. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any( + [model, prompt, temperature, candidate_count, top_p, top_k] + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = discuss_service.GenerateMessageRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if prompt is not None: + request.prompt = prompt + if temperature is not None: + request.temperature = temperature + if candidate_count is not None: + request.candidate_count = candidate_count + if top_p is not None: + request.top_p = top_p + if top_k is not None: + request.top_k = top_k + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.generate_message, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def count_message_tokens( + self, + request: Optional[ + Union[discuss_service.CountMessageTokensRequest, dict] + ] = None, + *, + model: Optional[str] = None, + prompt: Optional[discuss_service.MessagePrompt] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> discuss_service.CountMessageTokensResponse: + r"""Runs a model's tokenizer on a string and returns the + token count. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_count_message_tokens(): + # Create a client + client = generativelanguage_v1beta.DiscussServiceAsyncClient() + + # Initialize request argument(s) + prompt = generativelanguage_v1beta.MessagePrompt() + prompt.messages.content = "content_value" + + request = generativelanguage_v1beta.CountMessageTokensRequest( + model="model_value", + prompt=prompt, + ) + + # Make the request + response = await client.count_message_tokens(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.CountMessageTokensRequest, dict]]): + The request object. Counts the number of tokens in the ``prompt`` sent to a + model. + + Models may tokenize text differently, so each model may + return a different ``token_count``. + model (:class:`str`): + Required. The model's resource name. This serves as an + ID for the Model to use. + + This name should match a model name returned by the + ``ListModels`` method. + + Format: ``models/{model}`` + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + prompt (:class:`google.ai.generativelanguage_v1beta.types.MessagePrompt`): + Required. The prompt, whose token + count is to be returned. + + This corresponds to the ``prompt`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.CountMessageTokensResponse: + A response from CountMessageTokens. + + It returns the model's token_count for the prompt. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, prompt]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = discuss_service.CountMessageTokensRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if prompt is not None: + request.prompt = prompt + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.count_message_tokens, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def __aenter__(self) -> "DiscussServiceAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("DiscussServiceAsyncClient",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/discuss_service/client.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/discuss_service/client.py new file mode 100644 index 000000000000..e99768784230 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/discuss_service/client.py @@ -0,0 +1,775 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.ai.generativelanguage_v1beta import gapic_version as package_version + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.longrunning import operations_pb2 # type: ignore + +from google.ai.generativelanguage_v1beta.types import discuss_service, safety + +from .transports.base import DEFAULT_CLIENT_INFO, DiscussServiceTransport +from .transports.grpc import DiscussServiceGrpcTransport +from .transports.grpc_asyncio import DiscussServiceGrpcAsyncIOTransport +from .transports.rest import DiscussServiceRestTransport + + +class DiscussServiceClientMeta(type): + """Metaclass for the DiscussService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[DiscussServiceTransport]] + _transport_registry["grpc"] = DiscussServiceGrpcTransport + _transport_registry["grpc_asyncio"] = DiscussServiceGrpcAsyncIOTransport + _transport_registry["rest"] = DiscussServiceRestTransport + + def get_transport_class( + cls, + label: Optional[str] = None, + ) -> Type[DiscussServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class DiscussServiceClient(metaclass=DiscussServiceClientMeta): + """An API for using Generative Language Models (GLMs) in dialog + applications. + Also known as large language models (LLMs), this API provides + models that are trained for multi-turn dialog. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "generativelanguage.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DiscussServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DiscussServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> DiscussServiceTransport: + """Returns the transport used by the client instance. + + Returns: + DiscussServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def model_path( + model: str, + ) -> str: + """Returns a fully-qualified model string.""" + return "models/{model}".format( + model=model, + ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str, str]: + """Parses a model path into its component segments.""" + m = re.match(r"^models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path( + billing_account: str, + ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path( + folder: str, + ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format( + folder=folder, + ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path( + organization: str, + ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format( + organization=organization, + ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path( + project: str, + ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format( + project=project, + ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path( + project: str, + location: str, + ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[client_options_lib.ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[Union[str, DiscussServiceTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the discuss service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, DiscussServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + client_options = cast(client_options_lib.ClientOptions, client_options) + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( + client_options + ) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError( + "client_options.api_key and credentials are mutually exclusive" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, DiscussServiceTransport): + # transport is a DiscussServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr( + google.auth._default, "get_api_key_credentials" + ): + credentials = google.auth._default.get_api_key_credentials( + api_key_value + ) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=client_options.api_audience, + ) + + def generate_message( + self, + request: Optional[Union[discuss_service.GenerateMessageRequest, dict]] = None, + *, + model: Optional[str] = None, + prompt: Optional[discuss_service.MessagePrompt] = None, + temperature: Optional[float] = None, + candidate_count: Optional[int] = None, + top_p: Optional[float] = None, + top_k: Optional[int] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> discuss_service.GenerateMessageResponse: + r"""Generates a response from the model given an input + ``MessagePrompt``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_generate_message(): + # Create a client + client = generativelanguage_v1beta.DiscussServiceClient() + + # Initialize request argument(s) + prompt = generativelanguage_v1beta.MessagePrompt() + prompt.messages.content = "content_value" + + request = generativelanguage_v1beta.GenerateMessageRequest( + model="model_value", + prompt=prompt, + ) + + # Make the request + response = client.generate_message(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.GenerateMessageRequest, dict]): + The request object. Request to generate a message + response from the model. + model (str): + Required. The name of the model to use. + + Format: ``name=models/{model}``. + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + prompt (google.ai.generativelanguage_v1beta.types.MessagePrompt): + Required. The structured textual + input given to the model as a prompt. + Given a + prompt, the model will return what it + predicts is the next message in the + discussion. + + This corresponds to the ``prompt`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + temperature (float): + Optional. Controls the randomness of the output. + + Values can range over ``[0.0,1.0]``, inclusive. A value + closer to ``1.0`` will produce responses that are more + varied, while a value closer to ``0.0`` will typically + result in less surprising responses from the model. + + This corresponds to the ``temperature`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + candidate_count (int): + Optional. The number of generated response messages to + return. + + This value must be between ``[1, 8]``, inclusive. If + unset, this will default to ``1``. + + This corresponds to the ``candidate_count`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + top_p (float): + Optional. The maximum cumulative probability of tokens + to consider when sampling. + + The model uses combined Top-k and nucleus sampling. + + Nucleus sampling considers the smallest set of tokens + whose probability sum is at least ``top_p``. + + This corresponds to the ``top_p`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + top_k (int): + Optional. The maximum number of tokens to consider when + sampling. + + The model uses combined Top-k and nucleus sampling. + + Top-k sampling considers the set of ``top_k`` most + probable tokens. + + This corresponds to the ``top_k`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.GenerateMessageResponse: + The response from the model. + + This includes candidate messages and + conversation history in the form of + chronologically-ordered messages. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any( + [model, prompt, temperature, candidate_count, top_p, top_k] + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a discuss_service.GenerateMessageRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, discuss_service.GenerateMessageRequest): + request = discuss_service.GenerateMessageRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if prompt is not None: + request.prompt = prompt + if temperature is not None: + request.temperature = temperature + if candidate_count is not None: + request.candidate_count = candidate_count + if top_p is not None: + request.top_p = top_p + if top_k is not None: + request.top_k = top_k + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.generate_message] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def count_message_tokens( + self, + request: Optional[ + Union[discuss_service.CountMessageTokensRequest, dict] + ] = None, + *, + model: Optional[str] = None, + prompt: Optional[discuss_service.MessagePrompt] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> discuss_service.CountMessageTokensResponse: + r"""Runs a model's tokenizer on a string and returns the + token count. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_count_message_tokens(): + # Create a client + client = generativelanguage_v1beta.DiscussServiceClient() + + # Initialize request argument(s) + prompt = generativelanguage_v1beta.MessagePrompt() + prompt.messages.content = "content_value" + + request = generativelanguage_v1beta.CountMessageTokensRequest( + model="model_value", + prompt=prompt, + ) + + # Make the request + response = client.count_message_tokens(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.CountMessageTokensRequest, dict]): + The request object. Counts the number of tokens in the ``prompt`` sent to a + model. + + Models may tokenize text differently, so each model may + return a different ``token_count``. + model (str): + Required. The model's resource name. This serves as an + ID for the Model to use. + + This name should match a model name returned by the + ``ListModels`` method. + + Format: ``models/{model}`` + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + prompt (google.ai.generativelanguage_v1beta.types.MessagePrompt): + Required. The prompt, whose token + count is to be returned. + + This corresponds to the ``prompt`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.CountMessageTokensResponse: + A response from CountMessageTokens. + + It returns the model's token_count for the prompt. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, prompt]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a discuss_service.CountMessageTokensRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, discuss_service.CountMessageTokensRequest): + request = discuss_service.CountMessageTokensRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if prompt is not None: + request.prompt = prompt + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.count_message_tokens] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self) -> "DiscussServiceClient": + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("DiscussServiceClient",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/discuss_service/transports/__init__.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/discuss_service/transports/__init__.py new file mode 100644 index 000000000000..209ce4db6d6e --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/discuss_service/transports/__init__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import DiscussServiceTransport +from .grpc import DiscussServiceGrpcTransport +from .grpc_asyncio import DiscussServiceGrpcAsyncIOTransport +from .rest import DiscussServiceRestInterceptor, DiscussServiceRestTransport + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[DiscussServiceTransport]] +_transport_registry["grpc"] = DiscussServiceGrpcTransport +_transport_registry["grpc_asyncio"] = DiscussServiceGrpcAsyncIOTransport +_transport_registry["rest"] = DiscussServiceRestTransport + +__all__ = ( + "DiscussServiceTransport", + "DiscussServiceGrpcTransport", + "DiscussServiceGrpcAsyncIOTransport", + "DiscussServiceRestTransport", + "DiscussServiceRestInterceptor", +) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/discuss_service/transports/base.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/discuss_service/transports/base.py new file mode 100644 index 000000000000..49d99b8d9187 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/discuss_service/transports/base.py @@ -0,0 +1,194 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.ai.generativelanguage_v1beta import gapic_version as package_version +from google.ai.generativelanguage_v1beta.types import discuss_service + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +class DiscussServiceTransport(abc.ABC): + """Abstract transport class for DiscussService.""" + + AUTH_SCOPES = () + + DEFAULT_HOST: str = "generativelanguage.googleapis.com" + + def __init__( + self, + *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default( + **scopes_kwargs, quota_project_id=quota_project_id + ) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience( + api_audience if api_audience else host + ) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if ( + always_use_jwt_access + and isinstance(credentials, service_account.Credentials) + and hasattr(service_account.Credentials, "with_always_use_jwt_access") + ): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.generate_message: gapic_v1.method.wrap_method( + self.generate_message, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.count_message_tokens: gapic_v1.method.wrap_method( + self.count_message_tokens, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def generate_message( + self, + ) -> Callable[ + [discuss_service.GenerateMessageRequest], + Union[ + discuss_service.GenerateMessageResponse, + Awaitable[discuss_service.GenerateMessageResponse], + ], + ]: + raise NotImplementedError() + + @property + def count_message_tokens( + self, + ) -> Callable[ + [discuss_service.CountMessageTokensRequest], + Union[ + discuss_service.CountMessageTokensResponse, + Awaitable[discuss_service.CountMessageTokensResponse], + ], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ("DiscussServiceTransport",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/discuss_service/transports/grpc.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/discuss_service/transports/grpc.py new file mode 100644 index 000000000000..7495a7f14ba0 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/discuss_service/transports/grpc.py @@ -0,0 +1,303 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +from google.api_core import gapic_v1, grpc_helpers +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.longrunning import operations_pb2 # type: ignore +import grpc # type: ignore + +from google.ai.generativelanguage_v1beta.types import discuss_service + +from .base import DEFAULT_CLIENT_INFO, DiscussServiceTransport + + +class DiscussServiceGrpcTransport(DiscussServiceTransport): + """gRPC backend transport for DiscussService. + + An API for using Generative Language Models (GLMs) in dialog + applications. + Also known as large language models (LLMs), this API provides + models that are trained for multi-turn dialog. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service.""" + return self._grpc_channel + + @property + def generate_message( + self, + ) -> Callable[ + [discuss_service.GenerateMessageRequest], + discuss_service.GenerateMessageResponse, + ]: + r"""Return a callable for the generate message method over gRPC. + + Generates a response from the model given an input + ``MessagePrompt``. + + Returns: + Callable[[~.GenerateMessageRequest], + ~.GenerateMessageResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "generate_message" not in self._stubs: + self._stubs["generate_message"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.DiscussService/GenerateMessage", + request_serializer=discuss_service.GenerateMessageRequest.serialize, + response_deserializer=discuss_service.GenerateMessageResponse.deserialize, + ) + return self._stubs["generate_message"] + + @property + def count_message_tokens( + self, + ) -> Callable[ + [discuss_service.CountMessageTokensRequest], + discuss_service.CountMessageTokensResponse, + ]: + r"""Return a callable for the count message tokens method over gRPC. + + Runs a model's tokenizer on a string and returns the + token count. + + Returns: + Callable[[~.CountMessageTokensRequest], + ~.CountMessageTokensResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "count_message_tokens" not in self._stubs: + self._stubs["count_message_tokens"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.DiscussService/CountMessageTokens", + request_serializer=discuss_service.CountMessageTokensRequest.serialize, + response_deserializer=discuss_service.CountMessageTokensResponse.deserialize, + ) + return self._stubs["count_message_tokens"] + + def close(self): + self.grpc_channel.close() + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ("DiscussServiceGrpcTransport",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/discuss_service/transports/grpc_asyncio.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/discuss_service/transports/grpc_asyncio.py new file mode 100644 index 000000000000..26a81cb81dc4 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/discuss_service/transports/grpc_asyncio.py @@ -0,0 +1,302 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +from google.api_core import gapic_v1, grpc_helpers_async +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.longrunning import operations_pb2 # type: ignore +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.ai.generativelanguage_v1beta.types import discuss_service + +from .base import DEFAULT_CLIENT_INFO, DiscussServiceTransport +from .grpc import DiscussServiceGrpcTransport + + +class DiscussServiceGrpcAsyncIOTransport(DiscussServiceTransport): + """gRPC AsyncIO backend transport for DiscussService. + + An API for using Generative Language Models (GLMs) in dialog + applications. + Also known as large language models (LLMs), this API provides + models that are trained for multi-turn dialog. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def generate_message( + self, + ) -> Callable[ + [discuss_service.GenerateMessageRequest], + Awaitable[discuss_service.GenerateMessageResponse], + ]: + r"""Return a callable for the generate message method over gRPC. + + Generates a response from the model given an input + ``MessagePrompt``. + + Returns: + Callable[[~.GenerateMessageRequest], + Awaitable[~.GenerateMessageResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "generate_message" not in self._stubs: + self._stubs["generate_message"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.DiscussService/GenerateMessage", + request_serializer=discuss_service.GenerateMessageRequest.serialize, + response_deserializer=discuss_service.GenerateMessageResponse.deserialize, + ) + return self._stubs["generate_message"] + + @property + def count_message_tokens( + self, + ) -> Callable[ + [discuss_service.CountMessageTokensRequest], + Awaitable[discuss_service.CountMessageTokensResponse], + ]: + r"""Return a callable for the count message tokens method over gRPC. + + Runs a model's tokenizer on a string and returns the + token count. + + Returns: + Callable[[~.CountMessageTokensRequest], + Awaitable[~.CountMessageTokensResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "count_message_tokens" not in self._stubs: + self._stubs["count_message_tokens"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.DiscussService/CountMessageTokens", + request_serializer=discuss_service.CountMessageTokensRequest.serialize, + response_deserializer=discuss_service.CountMessageTokensResponse.deserialize, + ) + return self._stubs["count_message_tokens"] + + def close(self): + return self.grpc_channel.close() + + +__all__ = ("DiscussServiceGrpcAsyncIOTransport",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/discuss_service/transports/rest.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/discuss_service/transports/rest.py new file mode 100644 index 000000000000..2dd9f09f2a16 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/discuss_service/transports/rest.py @@ -0,0 +1,474 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import dataclasses +import json # type: ignore +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + +from google.api_core import gapic_v1, path_template, rest_helpers, rest_streaming +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.transport.requests import AuthorizedSession # type: ignore +from google.protobuf import json_format +import grpc # type: ignore +from requests import __version__ as requests_version + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + + +from google.longrunning import operations_pb2 # type: ignore + +from google.ai.generativelanguage_v1beta.types import discuss_service + +from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO +from .base import DiscussServiceTransport + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + + +class DiscussServiceRestInterceptor: + """Interceptor for DiscussService. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the DiscussServiceRestTransport. + + .. code-block:: python + class MyCustomDiscussServiceInterceptor(DiscussServiceRestInterceptor): + def pre_count_message_tokens(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_count_message_tokens(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_generate_message(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_generate_message(self, response): + logging.log(f"Received response: {response}") + return response + + transport = DiscussServiceRestTransport(interceptor=MyCustomDiscussServiceInterceptor()) + client = DiscussServiceClient(transport=transport) + + + """ + + def pre_count_message_tokens( + self, + request: discuss_service.CountMessageTokensRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[discuss_service.CountMessageTokensRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for count_message_tokens + + Override in a subclass to manipulate the request or metadata + before they are sent to the DiscussService server. + """ + return request, metadata + + def post_count_message_tokens( + self, response: discuss_service.CountMessageTokensResponse + ) -> discuss_service.CountMessageTokensResponse: + """Post-rpc interceptor for count_message_tokens + + Override in a subclass to manipulate the response + after it is returned by the DiscussService server but before + it is returned to user code. + """ + return response + + def pre_generate_message( + self, + request: discuss_service.GenerateMessageRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[discuss_service.GenerateMessageRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for generate_message + + Override in a subclass to manipulate the request or metadata + before they are sent to the DiscussService server. + """ + return request, metadata + + def post_generate_message( + self, response: discuss_service.GenerateMessageResponse + ) -> discuss_service.GenerateMessageResponse: + """Post-rpc interceptor for generate_message + + Override in a subclass to manipulate the response + after it is returned by the DiscussService server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class DiscussServiceRestStub: + _session: AuthorizedSession + _host: str + _interceptor: DiscussServiceRestInterceptor + + +class DiscussServiceRestTransport(DiscussServiceTransport): + """REST backend transport for DiscussService. + + An API for using Generative Language Models (GLMs) in dialog + applications. + Also known as large language models (LLMs), this API provides + models that are trained for multi-turn dialog. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + + """ + + def __init__( + self, + *, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + interceptor: Optional[DiscussServiceRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError( + f"Unexpected hostname structure: {host}" + ) # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST + ) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or DiscussServiceRestInterceptor() + self._prep_wrapped_messages(client_info) + + class _CountMessageTokens(DiscussServiceRestStub): + def __hash__(self): + return hash("CountMessageTokens") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: discuss_service.CountMessageTokensRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> discuss_service.CountMessageTokensResponse: + r"""Call the count message tokens method over HTTP. + + Args: + request (~.discuss_service.CountMessageTokensRequest): + The request object. Counts the number of tokens in the ``prompt`` sent to a + model. + + Models may tokenize text differently, so each model may + return a different ``token_count``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.discuss_service.CountMessageTokensResponse: + A response from ``CountMessageTokens``. + + It returns the model's ``token_count`` for the + ``prompt``. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta/{model=models/*}:countMessageTokens", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_count_message_tokens( + request, metadata + ) + pb_request = discuss_service.CountMessageTokensRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = discuss_service.CountMessageTokensResponse() + pb_resp = discuss_service.CountMessageTokensResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_count_message_tokens(resp) + return resp + + class _GenerateMessage(DiscussServiceRestStub): + def __hash__(self): + return hash("GenerateMessage") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: discuss_service.GenerateMessageRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> discuss_service.GenerateMessageResponse: + r"""Call the generate message method over HTTP. + + Args: + request (~.discuss_service.GenerateMessageRequest): + The request object. Request to generate a message + response from the model. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.discuss_service.GenerateMessageResponse: + The response from the model. + + This includes candidate messages and + conversation history in the form of + chronologically-ordered messages. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta/{model=models/*}:generateMessage", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_generate_message( + request, metadata + ) + pb_request = discuss_service.GenerateMessageRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = discuss_service.GenerateMessageResponse() + pb_resp = discuss_service.GenerateMessageResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_generate_message(resp) + return resp + + @property + def count_message_tokens( + self, + ) -> Callable[ + [discuss_service.CountMessageTokensRequest], + discuss_service.CountMessageTokensResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CountMessageTokens(self._session, self._host, self._interceptor) # type: ignore + + @property + def generate_message( + self, + ) -> Callable[ + [discuss_service.GenerateMessageRequest], + discuss_service.GenerateMessageResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GenerateMessage(self._session, self._host, self._interceptor) # type: ignore + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__ = ("DiscussServiceRestTransport",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/generative_service/__init__.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/generative_service/__init__.py new file mode 100644 index 000000000000..1e92ad575a7b --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/generative_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .async_client import GenerativeServiceAsyncClient +from .client import GenerativeServiceClient + +__all__ = ( + "GenerativeServiceClient", + "GenerativeServiceAsyncClient", +) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/generative_service/async_client.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/generative_service/async_client.py new file mode 100644 index 000000000000..d99cb5a81868 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/generative_service/async_client.py @@ -0,0 +1,1070 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import ( + AsyncIterable, + Awaitable, + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry_async as retries +from google.api_core.client_options import ClientOptions +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.ai.generativelanguage_v1beta import gapic_version as package_version + +try: + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore + +from google.longrunning import operations_pb2 # type: ignore + +from google.ai.generativelanguage_v1beta.types import generative_service, safety +from google.ai.generativelanguage_v1beta.types import content +from google.ai.generativelanguage_v1beta.types import content as gag_content + +from .client import GenerativeServiceClient +from .transports.base import DEFAULT_CLIENT_INFO, GenerativeServiceTransport +from .transports.grpc_asyncio import GenerativeServiceGrpcAsyncIOTransport + + +class GenerativeServiceAsyncClient: + """API for using Large Models that generate multimodal content + and have additional capabilities beyond text generation. + """ + + _client: GenerativeServiceClient + + DEFAULT_ENDPOINT = GenerativeServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = GenerativeServiceClient.DEFAULT_MTLS_ENDPOINT + + model_path = staticmethod(GenerativeServiceClient.model_path) + parse_model_path = staticmethod(GenerativeServiceClient.parse_model_path) + common_billing_account_path = staticmethod( + GenerativeServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + GenerativeServiceClient.parse_common_billing_account_path + ) + common_folder_path = staticmethod(GenerativeServiceClient.common_folder_path) + parse_common_folder_path = staticmethod( + GenerativeServiceClient.parse_common_folder_path + ) + common_organization_path = staticmethod( + GenerativeServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + GenerativeServiceClient.parse_common_organization_path + ) + common_project_path = staticmethod(GenerativeServiceClient.common_project_path) + parse_common_project_path = staticmethod( + GenerativeServiceClient.parse_common_project_path + ) + common_location_path = staticmethod(GenerativeServiceClient.common_location_path) + parse_common_location_path = staticmethod( + GenerativeServiceClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + GenerativeServiceAsyncClient: The constructed client. + """ + return GenerativeServiceClient.from_service_account_info.__func__(GenerativeServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + GenerativeServiceAsyncClient: The constructed client. + """ + return GenerativeServiceClient.from_service_account_file.__func__(GenerativeServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return GenerativeServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> GenerativeServiceTransport: + """Returns the transport used by the client instance. + + Returns: + GenerativeServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial( + type(GenerativeServiceClient).get_transport_class, type(GenerativeServiceClient) + ) + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, GenerativeServiceTransport] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the generative service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.GenerativeServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = GenerativeServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def generate_content( + self, + request: Optional[ + Union[generative_service.GenerateContentRequest, dict] + ] = None, + *, + model: Optional[str] = None, + contents: Optional[MutableSequence[content.Content]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> generative_service.GenerateContentResponse: + r"""Generates a response from the model given an input + ``GenerateContentRequest``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_generate_content(): + # Create a client + client = generativelanguage_v1beta.GenerativeServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GenerateContentRequest( + model="model_value", + ) + + # Make the request + response = await client.generate_content(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.GenerateContentRequest, dict]]): + The request object. Request to generate a completion from + the model. + model (:class:`str`): + Required. The name of the ``Model`` to use for + generating the completion. + + Format: ``name=models/{model}``. + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + contents (:class:`MutableSequence[google.ai.generativelanguage_v1beta.types.Content]`): + Required. The content of the current + conversation with the model. + For single-turn queries, this is a + single instance. For multi-turn queries, + this is a repeated field that contains + conversation history + latest request. + + This corresponds to the ``contents`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.GenerateContentResponse: + Response from the model supporting multiple candidates. + + Note on safety ratings and content filtering. They + are reported for both prompt in + GenerateContentResponse.prompt_feedback and for each + candidate in finish_reason and in safety_ratings. The + API contract is that: - either all requested + candidates are returned or no candidates at all - no + candidates are returned only if there was something + wrong with the prompt (see prompt_feedback) - + feedback on each candidate is reported on + finish_reason and safety_ratings. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, contents]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = generative_service.GenerateContentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if contents: + request.contents.extend(contents) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.generate_content, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def generate_answer( + self, + request: Optional[Union[generative_service.GenerateAnswerRequest, dict]] = None, + *, + model: Optional[str] = None, + contents: Optional[MutableSequence[content.Content]] = None, + safety_settings: Optional[MutableSequence[safety.SafetySetting]] = None, + answer_style: Optional[ + generative_service.GenerateAnswerRequest.AnswerStyle + ] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> generative_service.GenerateAnswerResponse: + r"""Generates a grounded answer from the model given an input + ``GenerateAnswerRequest``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_generate_answer(): + # Create a client + client = generativelanguage_v1beta.GenerativeServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GenerateAnswerRequest( + model="model_value", + answer_style="VERBOSE", + ) + + # Make the request + response = await client.generate_answer(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.GenerateAnswerRequest, dict]]): + The request object. Request to generate a grounded answer + from the model. + model (:class:`str`): + Required. The name of the ``Model`` to use for + generating the grounded response. + + Format: ``model=models/{model}``. + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + contents (:class:`MutableSequence[google.ai.generativelanguage_v1beta.types.Content]`): + Required. The content of the current conversation with + the model. For single-turn queries, this is a single + question to answer. For multi-turn queries, this is a + repeated field that contains conversation history and + the last ``Content`` in the list containing the + question. + + This corresponds to the ``contents`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + safety_settings (:class:`MutableSequence[google.ai.generativelanguage_v1beta.types.SafetySetting]`): + Optional. A list of unique ``SafetySetting`` instances + for blocking unsafe content. + + This will be enforced on the + ``GenerateAnswerRequest.contents`` and + ``GenerateAnswerResponse.candidate``. There should not + be more than one setting for each ``SafetyCategory`` + type. The API will block any contents and responses that + fail to meet the thresholds set by these settings. This + list overrides the default settings for each + ``SafetyCategory`` specified in the safety_settings. If + there is no ``SafetySetting`` for a given + ``SafetyCategory`` provided in the list, the API will + use the default safety setting for that category. + + This corresponds to the ``safety_settings`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + answer_style (:class:`google.ai.generativelanguage_v1beta.types.GenerateAnswerRequest.AnswerStyle`): + Required. Style in which answers + should be returned. + + This corresponds to the ``answer_style`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.GenerateAnswerResponse: + Response from the model for a + grounded answer. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, contents, safety_settings, answer_style]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = generative_service.GenerateAnswerRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if answer_style is not None: + request.answer_style = answer_style + if contents: + request.contents.extend(contents) + if safety_settings: + request.safety_settings.extend(safety_settings) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.generate_answer, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def stream_generate_content( + self, + request: Optional[ + Union[generative_service.GenerateContentRequest, dict] + ] = None, + *, + model: Optional[str] = None, + contents: Optional[MutableSequence[content.Content]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Awaitable[AsyncIterable[generative_service.GenerateContentResponse]]: + r"""Generates a streamed response from the model given an input + ``GenerateContentRequest``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_stream_generate_content(): + # Create a client + client = generativelanguage_v1beta.GenerativeServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GenerateContentRequest( + model="model_value", + ) + + # Make the request + stream = await client.stream_generate_content(request=request) + + # Handle the response + async for response in stream: + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.GenerateContentRequest, dict]]): + The request object. Request to generate a completion from + the model. + model (:class:`str`): + Required. The name of the ``Model`` to use for + generating the completion. + + Format: ``name=models/{model}``. + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + contents (:class:`MutableSequence[google.ai.generativelanguage_v1beta.types.Content]`): + Required. The content of the current + conversation with the model. + For single-turn queries, this is a + single instance. For multi-turn queries, + this is a repeated field that contains + conversation history + latest request. + + This corresponds to the ``contents`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + AsyncIterable[google.ai.generativelanguage_v1beta.types.GenerateContentResponse]: + Response from the model supporting multiple candidates. + + Note on safety ratings and content filtering. They + are reported for both prompt in + GenerateContentResponse.prompt_feedback and for each + candidate in finish_reason and in safety_ratings. The + API contract is that: - either all requested + candidates are returned or no candidates at all - no + candidates are returned only if there was something + wrong with the prompt (see prompt_feedback) - + feedback on each candidate is reported on + finish_reason and safety_ratings. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, contents]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = generative_service.GenerateContentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if contents: + request.contents.extend(contents) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.stream_generate_content, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def embed_content( + self, + request: Optional[Union[generative_service.EmbedContentRequest, dict]] = None, + *, + model: Optional[str] = None, + content: Optional[gag_content.Content] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> generative_service.EmbedContentResponse: + r"""Generates an embedding from the model given an input + ``Content``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_embed_content(): + # Create a client + client = generativelanguage_v1beta.GenerativeServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.EmbedContentRequest( + model="model_value", + ) + + # Make the request + response = await client.embed_content(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.EmbedContentRequest, dict]]): + The request object. Request containing the ``Content`` for the model to + embed. + model (:class:`str`): + Required. The model's resource name. This serves as an + ID for the Model to use. + + This name should match a model name returned by the + ``ListModels`` method. + + Format: ``models/{model}`` + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + content (:class:`google.ai.generativelanguage_v1beta.types.Content`): + Required. The content to embed. Only the ``parts.text`` + fields will be counted. + + This corresponds to the ``content`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.EmbedContentResponse: + The response to an EmbedContentRequest. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, content]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = generative_service.EmbedContentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if content is not None: + request.content = content + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.embed_content, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def batch_embed_contents( + self, + request: Optional[ + Union[generative_service.BatchEmbedContentsRequest, dict] + ] = None, + *, + model: Optional[str] = None, + requests: Optional[ + MutableSequence[generative_service.EmbedContentRequest] + ] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> generative_service.BatchEmbedContentsResponse: + r"""Generates multiple embeddings from the model given + input text in a synchronous call. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_batch_embed_contents(): + # Create a client + client = generativelanguage_v1beta.GenerativeServiceAsyncClient() + + # Initialize request argument(s) + requests = generativelanguage_v1beta.EmbedContentRequest() + requests.model = "model_value" + + request = generativelanguage_v1beta.BatchEmbedContentsRequest( + model="model_value", + requests=requests, + ) + + # Make the request + response = await client.batch_embed_contents(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.BatchEmbedContentsRequest, dict]]): + The request object. Batch request to get embeddings from + the model for a list of prompts. + model (:class:`str`): + Required. The model's resource name. This serves as an + ID for the Model to use. + + This name should match a model name returned by the + ``ListModels`` method. + + Format: ``models/{model}`` + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + requests (:class:`MutableSequence[google.ai.generativelanguage_v1beta.types.EmbedContentRequest]`): + Required. Embed requests for the batch. The model in + each of these requests must match the model specified + ``BatchEmbedContentsRequest.model``. + + This corresponds to the ``requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.BatchEmbedContentsResponse: + The response to a BatchEmbedContentsRequest. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, requests]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = generative_service.BatchEmbedContentsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if requests: + request.requests.extend(requests) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_embed_contents, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def count_tokens( + self, + request: Optional[Union[generative_service.CountTokensRequest, dict]] = None, + *, + model: Optional[str] = None, + contents: Optional[MutableSequence[content.Content]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> generative_service.CountTokensResponse: + r"""Runs a model's tokenizer on input content and returns + the token count. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_count_tokens(): + # Create a client + client = generativelanguage_v1beta.GenerativeServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.CountTokensRequest( + model="model_value", + ) + + # Make the request + response = await client.count_tokens(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.CountTokensRequest, dict]]): + The request object. Counts the number of tokens in the ``prompt`` sent to a + model. + + Models may tokenize text differently, so each model may + return a different ``token_count``. + model (:class:`str`): + Required. The model's resource name. This serves as an + ID for the Model to use. + + This name should match a model name returned by the + ``ListModels`` method. + + Format: ``models/{model}`` + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + contents (:class:`MutableSequence[google.ai.generativelanguage_v1beta.types.Content]`): + Required. The input given to the + model as a prompt. + + This corresponds to the ``contents`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.CountTokensResponse: + A response from CountTokens. + + It returns the model's token_count for the prompt. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, contents]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = generative_service.CountTokensRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if contents: + request.contents.extend(contents) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.count_tokens, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def __aenter__(self) -> "GenerativeServiceAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("GenerativeServiceAsyncClient",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/generative_service/client.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/generative_service/client.py new file mode 100644 index 000000000000..f72aa4a6eb21 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/generative_service/client.py @@ -0,0 +1,1244 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import ( + Dict, + Iterable, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.ai.generativelanguage_v1beta import gapic_version as package_version + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.longrunning import operations_pb2 # type: ignore + +from google.ai.generativelanguage_v1beta.types import generative_service, safety +from google.ai.generativelanguage_v1beta.types import content +from google.ai.generativelanguage_v1beta.types import content as gag_content + +from .transports.base import DEFAULT_CLIENT_INFO, GenerativeServiceTransport +from .transports.grpc import GenerativeServiceGrpcTransport +from .transports.grpc_asyncio import GenerativeServiceGrpcAsyncIOTransport +from .transports.rest import GenerativeServiceRestTransport + + +class GenerativeServiceClientMeta(type): + """Metaclass for the GenerativeService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[GenerativeServiceTransport]] + _transport_registry["grpc"] = GenerativeServiceGrpcTransport + _transport_registry["grpc_asyncio"] = GenerativeServiceGrpcAsyncIOTransport + _transport_registry["rest"] = GenerativeServiceRestTransport + + def get_transport_class( + cls, + label: Optional[str] = None, + ) -> Type[GenerativeServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class GenerativeServiceClient(metaclass=GenerativeServiceClientMeta): + """API for using Large Models that generate multimodal content + and have additional capabilities beyond text generation. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "generativelanguage.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + GenerativeServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + GenerativeServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> GenerativeServiceTransport: + """Returns the transport used by the client instance. + + Returns: + GenerativeServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def model_path( + model: str, + ) -> str: + """Returns a fully-qualified model string.""" + return "models/{model}".format( + model=model, + ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str, str]: + """Parses a model path into its component segments.""" + m = re.match(r"^models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path( + billing_account: str, + ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path( + folder: str, + ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format( + folder=folder, + ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path( + organization: str, + ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format( + organization=organization, + ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path( + project: str, + ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format( + project=project, + ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path( + project: str, + location: str, + ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[client_options_lib.ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[Union[str, GenerativeServiceTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the generative service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, GenerativeServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + client_options = cast(client_options_lib.ClientOptions, client_options) + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( + client_options + ) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError( + "client_options.api_key and credentials are mutually exclusive" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, GenerativeServiceTransport): + # transport is a GenerativeServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr( + google.auth._default, "get_api_key_credentials" + ): + credentials = google.auth._default.get_api_key_credentials( + api_key_value + ) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=client_options.api_audience, + ) + + def generate_content( + self, + request: Optional[ + Union[generative_service.GenerateContentRequest, dict] + ] = None, + *, + model: Optional[str] = None, + contents: Optional[MutableSequence[content.Content]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> generative_service.GenerateContentResponse: + r"""Generates a response from the model given an input + ``GenerateContentRequest``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_generate_content(): + # Create a client + client = generativelanguage_v1beta.GenerativeServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GenerateContentRequest( + model="model_value", + ) + + # Make the request + response = client.generate_content(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.GenerateContentRequest, dict]): + The request object. Request to generate a completion from + the model. + model (str): + Required. The name of the ``Model`` to use for + generating the completion. + + Format: ``name=models/{model}``. + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + contents (MutableSequence[google.ai.generativelanguage_v1beta.types.Content]): + Required. The content of the current + conversation with the model. + For single-turn queries, this is a + single instance. For multi-turn queries, + this is a repeated field that contains + conversation history + latest request. + + This corresponds to the ``contents`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.GenerateContentResponse: + Response from the model supporting multiple candidates. + + Note on safety ratings and content filtering. They + are reported for both prompt in + GenerateContentResponse.prompt_feedback and for each + candidate in finish_reason and in safety_ratings. The + API contract is that: - either all requested + candidates are returned or no candidates at all - no + candidates are returned only if there was something + wrong with the prompt (see prompt_feedback) - + feedback on each candidate is reported on + finish_reason and safety_ratings. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, contents]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a generative_service.GenerateContentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, generative_service.GenerateContentRequest): + request = generative_service.GenerateContentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if contents is not None: + request.contents = contents + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.generate_content] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def generate_answer( + self, + request: Optional[Union[generative_service.GenerateAnswerRequest, dict]] = None, + *, + model: Optional[str] = None, + contents: Optional[MutableSequence[content.Content]] = None, + safety_settings: Optional[MutableSequence[safety.SafetySetting]] = None, + answer_style: Optional[ + generative_service.GenerateAnswerRequest.AnswerStyle + ] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> generative_service.GenerateAnswerResponse: + r"""Generates a grounded answer from the model given an input + ``GenerateAnswerRequest``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_generate_answer(): + # Create a client + client = generativelanguage_v1beta.GenerativeServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GenerateAnswerRequest( + model="model_value", + answer_style="VERBOSE", + ) + + # Make the request + response = client.generate_answer(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.GenerateAnswerRequest, dict]): + The request object. Request to generate a grounded answer + from the model. + model (str): + Required. The name of the ``Model`` to use for + generating the grounded response. + + Format: ``model=models/{model}``. + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + contents (MutableSequence[google.ai.generativelanguage_v1beta.types.Content]): + Required. The content of the current conversation with + the model. For single-turn queries, this is a single + question to answer. For multi-turn queries, this is a + repeated field that contains conversation history and + the last ``Content`` in the list containing the + question. + + This corresponds to the ``contents`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + safety_settings (MutableSequence[google.ai.generativelanguage_v1beta.types.SafetySetting]): + Optional. A list of unique ``SafetySetting`` instances + for blocking unsafe content. + + This will be enforced on the + ``GenerateAnswerRequest.contents`` and + ``GenerateAnswerResponse.candidate``. There should not + be more than one setting for each ``SafetyCategory`` + type. The API will block any contents and responses that + fail to meet the thresholds set by these settings. This + list overrides the default settings for each + ``SafetyCategory`` specified in the safety_settings. If + there is no ``SafetySetting`` for a given + ``SafetyCategory`` provided in the list, the API will + use the default safety setting for that category. + + This corresponds to the ``safety_settings`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + answer_style (google.ai.generativelanguage_v1beta.types.GenerateAnswerRequest.AnswerStyle): + Required. Style in which answers + should be returned. + + This corresponds to the ``answer_style`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.GenerateAnswerResponse: + Response from the model for a + grounded answer. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, contents, safety_settings, answer_style]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a generative_service.GenerateAnswerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, generative_service.GenerateAnswerRequest): + request = generative_service.GenerateAnswerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if contents is not None: + request.contents = contents + if safety_settings is not None: + request.safety_settings = safety_settings + if answer_style is not None: + request.answer_style = answer_style + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.generate_answer] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def stream_generate_content( + self, + request: Optional[ + Union[generative_service.GenerateContentRequest, dict] + ] = None, + *, + model: Optional[str] = None, + contents: Optional[MutableSequence[content.Content]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Iterable[generative_service.GenerateContentResponse]: + r"""Generates a streamed response from the model given an input + ``GenerateContentRequest``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_stream_generate_content(): + # Create a client + client = generativelanguage_v1beta.GenerativeServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GenerateContentRequest( + model="model_value", + ) + + # Make the request + stream = client.stream_generate_content(request=request) + + # Handle the response + for response in stream: + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.GenerateContentRequest, dict]): + The request object. Request to generate a completion from + the model. + model (str): + Required. The name of the ``Model`` to use for + generating the completion. + + Format: ``name=models/{model}``. + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + contents (MutableSequence[google.ai.generativelanguage_v1beta.types.Content]): + Required. The content of the current + conversation with the model. + For single-turn queries, this is a + single instance. For multi-turn queries, + this is a repeated field that contains + conversation history + latest request. + + This corresponds to the ``contents`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + Iterable[google.ai.generativelanguage_v1beta.types.GenerateContentResponse]: + Response from the model supporting multiple candidates. + + Note on safety ratings and content filtering. They + are reported for both prompt in + GenerateContentResponse.prompt_feedback and for each + candidate in finish_reason and in safety_ratings. The + API contract is that: - either all requested + candidates are returned or no candidates at all - no + candidates are returned only if there was something + wrong with the prompt (see prompt_feedback) - + feedback on each candidate is reported on + finish_reason and safety_ratings. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, contents]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a generative_service.GenerateContentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, generative_service.GenerateContentRequest): + request = generative_service.GenerateContentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if contents is not None: + request.contents = contents + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.stream_generate_content] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def embed_content( + self, + request: Optional[Union[generative_service.EmbedContentRequest, dict]] = None, + *, + model: Optional[str] = None, + content: Optional[gag_content.Content] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> generative_service.EmbedContentResponse: + r"""Generates an embedding from the model given an input + ``Content``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_embed_content(): + # Create a client + client = generativelanguage_v1beta.GenerativeServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.EmbedContentRequest( + model="model_value", + ) + + # Make the request + response = client.embed_content(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.EmbedContentRequest, dict]): + The request object. Request containing the ``Content`` for the model to + embed. + model (str): + Required. The model's resource name. This serves as an + ID for the Model to use. + + This name should match a model name returned by the + ``ListModels`` method. + + Format: ``models/{model}`` + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + content (google.ai.generativelanguage_v1beta.types.Content): + Required. The content to embed. Only the ``parts.text`` + fields will be counted. + + This corresponds to the ``content`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.EmbedContentResponse: + The response to an EmbedContentRequest. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, content]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a generative_service.EmbedContentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, generative_service.EmbedContentRequest): + request = generative_service.EmbedContentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if content is not None: + request.content = content + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.embed_content] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def batch_embed_contents( + self, + request: Optional[ + Union[generative_service.BatchEmbedContentsRequest, dict] + ] = None, + *, + model: Optional[str] = None, + requests: Optional[ + MutableSequence[generative_service.EmbedContentRequest] + ] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> generative_service.BatchEmbedContentsResponse: + r"""Generates multiple embeddings from the model given + input text in a synchronous call. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_batch_embed_contents(): + # Create a client + client = generativelanguage_v1beta.GenerativeServiceClient() + + # Initialize request argument(s) + requests = generativelanguage_v1beta.EmbedContentRequest() + requests.model = "model_value" + + request = generativelanguage_v1beta.BatchEmbedContentsRequest( + model="model_value", + requests=requests, + ) + + # Make the request + response = client.batch_embed_contents(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.BatchEmbedContentsRequest, dict]): + The request object. Batch request to get embeddings from + the model for a list of prompts. + model (str): + Required. The model's resource name. This serves as an + ID for the Model to use. + + This name should match a model name returned by the + ``ListModels`` method. + + Format: ``models/{model}`` + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + requests (MutableSequence[google.ai.generativelanguage_v1beta.types.EmbedContentRequest]): + Required. Embed requests for the batch. The model in + each of these requests must match the model specified + ``BatchEmbedContentsRequest.model``. + + This corresponds to the ``requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.BatchEmbedContentsResponse: + The response to a BatchEmbedContentsRequest. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, requests]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a generative_service.BatchEmbedContentsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, generative_service.BatchEmbedContentsRequest): + request = generative_service.BatchEmbedContentsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if requests is not None: + request.requests = requests + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.batch_embed_contents] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def count_tokens( + self, + request: Optional[Union[generative_service.CountTokensRequest, dict]] = None, + *, + model: Optional[str] = None, + contents: Optional[MutableSequence[content.Content]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> generative_service.CountTokensResponse: + r"""Runs a model's tokenizer on input content and returns + the token count. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_count_tokens(): + # Create a client + client = generativelanguage_v1beta.GenerativeServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.CountTokensRequest( + model="model_value", + ) + + # Make the request + response = client.count_tokens(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.CountTokensRequest, dict]): + The request object. Counts the number of tokens in the ``prompt`` sent to a + model. + + Models may tokenize text differently, so each model may + return a different ``token_count``. + model (str): + Required. The model's resource name. This serves as an + ID for the Model to use. + + This name should match a model name returned by the + ``ListModels`` method. + + Format: ``models/{model}`` + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + contents (MutableSequence[google.ai.generativelanguage_v1beta.types.Content]): + Required. The input given to the + model as a prompt. + + This corresponds to the ``contents`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.CountTokensResponse: + A response from CountTokens. + + It returns the model's token_count for the prompt. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, contents]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a generative_service.CountTokensRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, generative_service.CountTokensRequest): + request = generative_service.CountTokensRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if contents is not None: + request.contents = contents + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.count_tokens] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self) -> "GenerativeServiceClient": + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("GenerativeServiceClient",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/generative_service/transports/__init__.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/generative_service/transports/__init__.py new file mode 100644 index 000000000000..1d35da543a1e --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/generative_service/transports/__init__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import GenerativeServiceTransport +from .grpc import GenerativeServiceGrpcTransport +from .grpc_asyncio import GenerativeServiceGrpcAsyncIOTransport +from .rest import GenerativeServiceRestInterceptor, GenerativeServiceRestTransport + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[GenerativeServiceTransport]] +_transport_registry["grpc"] = GenerativeServiceGrpcTransport +_transport_registry["grpc_asyncio"] = GenerativeServiceGrpcAsyncIOTransport +_transport_registry["rest"] = GenerativeServiceRestTransport + +__all__ = ( + "GenerativeServiceTransport", + "GenerativeServiceGrpcTransport", + "GenerativeServiceGrpcAsyncIOTransport", + "GenerativeServiceRestTransport", + "GenerativeServiceRestInterceptor", +) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/generative_service/transports/base.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/generative_service/transports/base.py new file mode 100644 index 000000000000..7fffa3e4d2d4 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/generative_service/transports/base.py @@ -0,0 +1,298 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.ai.generativelanguage_v1beta import gapic_version as package_version +from google.ai.generativelanguage_v1beta.types import generative_service + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +class GenerativeServiceTransport(abc.ABC): + """Abstract transport class for GenerativeService.""" + + AUTH_SCOPES = () + + DEFAULT_HOST: str = "generativelanguage.googleapis.com" + + def __init__( + self, + *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default( + **scopes_kwargs, quota_project_id=quota_project_id + ) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience( + api_audience if api_audience else host + ) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if ( + always_use_jwt_access + and isinstance(credentials, service_account.Credentials) + and hasattr(service_account.Credentials, "with_always_use_jwt_access") + ): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.generate_content: gapic_v1.method.wrap_method( + self.generate_content, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.generate_answer: gapic_v1.method.wrap_method( + self.generate_answer, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.stream_generate_content: gapic_v1.method.wrap_method( + self.stream_generate_content, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.embed_content: gapic_v1.method.wrap_method( + self.embed_content, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.batch_embed_contents: gapic_v1.method.wrap_method( + self.batch_embed_contents, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.count_tokens: gapic_v1.method.wrap_method( + self.count_tokens, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def generate_content( + self, + ) -> Callable[ + [generative_service.GenerateContentRequest], + Union[ + generative_service.GenerateContentResponse, + Awaitable[generative_service.GenerateContentResponse], + ], + ]: + raise NotImplementedError() + + @property + def generate_answer( + self, + ) -> Callable[ + [generative_service.GenerateAnswerRequest], + Union[ + generative_service.GenerateAnswerResponse, + Awaitable[generative_service.GenerateAnswerResponse], + ], + ]: + raise NotImplementedError() + + @property + def stream_generate_content( + self, + ) -> Callable[ + [generative_service.GenerateContentRequest], + Union[ + generative_service.GenerateContentResponse, + Awaitable[generative_service.GenerateContentResponse], + ], + ]: + raise NotImplementedError() + + @property + def embed_content( + self, + ) -> Callable[ + [generative_service.EmbedContentRequest], + Union[ + generative_service.EmbedContentResponse, + Awaitable[generative_service.EmbedContentResponse], + ], + ]: + raise NotImplementedError() + + @property + def batch_embed_contents( + self, + ) -> Callable[ + [generative_service.BatchEmbedContentsRequest], + Union[ + generative_service.BatchEmbedContentsResponse, + Awaitable[generative_service.BatchEmbedContentsResponse], + ], + ]: + raise NotImplementedError() + + @property + def count_tokens( + self, + ) -> Callable[ + [generative_service.CountTokensRequest], + Union[ + generative_service.CountTokensResponse, + Awaitable[generative_service.CountTokensResponse], + ], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ("GenerativeServiceTransport",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/generative_service/transports/grpc.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/generative_service/transports/grpc.py new file mode 100644 index 000000000000..b1d1aaa0fb7e --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/generative_service/transports/grpc.py @@ -0,0 +1,420 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +from google.api_core import gapic_v1, grpc_helpers +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.longrunning import operations_pb2 # type: ignore +import grpc # type: ignore + +from google.ai.generativelanguage_v1beta.types import generative_service + +from .base import DEFAULT_CLIENT_INFO, GenerativeServiceTransport + + +class GenerativeServiceGrpcTransport(GenerativeServiceTransport): + """gRPC backend transport for GenerativeService. + + API for using Large Models that generate multimodal content + and have additional capabilities beyond text generation. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service.""" + return self._grpc_channel + + @property + def generate_content( + self, + ) -> Callable[ + [generative_service.GenerateContentRequest], + generative_service.GenerateContentResponse, + ]: + r"""Return a callable for the generate content method over gRPC. + + Generates a response from the model given an input + ``GenerateContentRequest``. + + Returns: + Callable[[~.GenerateContentRequest], + ~.GenerateContentResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "generate_content" not in self._stubs: + self._stubs["generate_content"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.GenerativeService/GenerateContent", + request_serializer=generative_service.GenerateContentRequest.serialize, + response_deserializer=generative_service.GenerateContentResponse.deserialize, + ) + return self._stubs["generate_content"] + + @property + def generate_answer( + self, + ) -> Callable[ + [generative_service.GenerateAnswerRequest], + generative_service.GenerateAnswerResponse, + ]: + r"""Return a callable for the generate answer method over gRPC. + + Generates a grounded answer from the model given an input + ``GenerateAnswerRequest``. + + Returns: + Callable[[~.GenerateAnswerRequest], + ~.GenerateAnswerResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "generate_answer" not in self._stubs: + self._stubs["generate_answer"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.GenerativeService/GenerateAnswer", + request_serializer=generative_service.GenerateAnswerRequest.serialize, + response_deserializer=generative_service.GenerateAnswerResponse.deserialize, + ) + return self._stubs["generate_answer"] + + @property + def stream_generate_content( + self, + ) -> Callable[ + [generative_service.GenerateContentRequest], + generative_service.GenerateContentResponse, + ]: + r"""Return a callable for the stream generate content method over gRPC. + + Generates a streamed response from the model given an input + ``GenerateContentRequest``. + + Returns: + Callable[[~.GenerateContentRequest], + ~.GenerateContentResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "stream_generate_content" not in self._stubs: + self._stubs["stream_generate_content"] = self.grpc_channel.unary_stream( + "/google.ai.generativelanguage.v1beta.GenerativeService/StreamGenerateContent", + request_serializer=generative_service.GenerateContentRequest.serialize, + response_deserializer=generative_service.GenerateContentResponse.deserialize, + ) + return self._stubs["stream_generate_content"] + + @property + def embed_content( + self, + ) -> Callable[ + [generative_service.EmbedContentRequest], + generative_service.EmbedContentResponse, + ]: + r"""Return a callable for the embed content method over gRPC. + + Generates an embedding from the model given an input + ``Content``. + + Returns: + Callable[[~.EmbedContentRequest], + ~.EmbedContentResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "embed_content" not in self._stubs: + self._stubs["embed_content"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.GenerativeService/EmbedContent", + request_serializer=generative_service.EmbedContentRequest.serialize, + response_deserializer=generative_service.EmbedContentResponse.deserialize, + ) + return self._stubs["embed_content"] + + @property + def batch_embed_contents( + self, + ) -> Callable[ + [generative_service.BatchEmbedContentsRequest], + generative_service.BatchEmbedContentsResponse, + ]: + r"""Return a callable for the batch embed contents method over gRPC. + + Generates multiple embeddings from the model given + input text in a synchronous call. + + Returns: + Callable[[~.BatchEmbedContentsRequest], + ~.BatchEmbedContentsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_embed_contents" not in self._stubs: + self._stubs["batch_embed_contents"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.GenerativeService/BatchEmbedContents", + request_serializer=generative_service.BatchEmbedContentsRequest.serialize, + response_deserializer=generative_service.BatchEmbedContentsResponse.deserialize, + ) + return self._stubs["batch_embed_contents"] + + @property + def count_tokens( + self, + ) -> Callable[ + [generative_service.CountTokensRequest], generative_service.CountTokensResponse + ]: + r"""Return a callable for the count tokens method over gRPC. + + Runs a model's tokenizer on input content and returns + the token count. + + Returns: + Callable[[~.CountTokensRequest], + ~.CountTokensResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "count_tokens" not in self._stubs: + self._stubs["count_tokens"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.GenerativeService/CountTokens", + request_serializer=generative_service.CountTokensRequest.serialize, + response_deserializer=generative_service.CountTokensResponse.deserialize, + ) + return self._stubs["count_tokens"] + + def close(self): + self.grpc_channel.close() + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ("GenerativeServiceGrpcTransport",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/generative_service/transports/grpc_asyncio.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/generative_service/transports/grpc_asyncio.py new file mode 100644 index 000000000000..62d461f879db --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/generative_service/transports/grpc_asyncio.py @@ -0,0 +1,420 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +from google.api_core import gapic_v1, grpc_helpers_async +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.longrunning import operations_pb2 # type: ignore +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.ai.generativelanguage_v1beta.types import generative_service + +from .base import DEFAULT_CLIENT_INFO, GenerativeServiceTransport +from .grpc import GenerativeServiceGrpcTransport + + +class GenerativeServiceGrpcAsyncIOTransport(GenerativeServiceTransport): + """gRPC AsyncIO backend transport for GenerativeService. + + API for using Large Models that generate multimodal content + and have additional capabilities beyond text generation. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def generate_content( + self, + ) -> Callable[ + [generative_service.GenerateContentRequest], + Awaitable[generative_service.GenerateContentResponse], + ]: + r"""Return a callable for the generate content method over gRPC. + + Generates a response from the model given an input + ``GenerateContentRequest``. + + Returns: + Callable[[~.GenerateContentRequest], + Awaitable[~.GenerateContentResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "generate_content" not in self._stubs: + self._stubs["generate_content"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.GenerativeService/GenerateContent", + request_serializer=generative_service.GenerateContentRequest.serialize, + response_deserializer=generative_service.GenerateContentResponse.deserialize, + ) + return self._stubs["generate_content"] + + @property + def generate_answer( + self, + ) -> Callable[ + [generative_service.GenerateAnswerRequest], + Awaitable[generative_service.GenerateAnswerResponse], + ]: + r"""Return a callable for the generate answer method over gRPC. + + Generates a grounded answer from the model given an input + ``GenerateAnswerRequest``. + + Returns: + Callable[[~.GenerateAnswerRequest], + Awaitable[~.GenerateAnswerResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "generate_answer" not in self._stubs: + self._stubs["generate_answer"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.GenerativeService/GenerateAnswer", + request_serializer=generative_service.GenerateAnswerRequest.serialize, + response_deserializer=generative_service.GenerateAnswerResponse.deserialize, + ) + return self._stubs["generate_answer"] + + @property + def stream_generate_content( + self, + ) -> Callable[ + [generative_service.GenerateContentRequest], + Awaitable[generative_service.GenerateContentResponse], + ]: + r"""Return a callable for the stream generate content method over gRPC. + + Generates a streamed response from the model given an input + ``GenerateContentRequest``. + + Returns: + Callable[[~.GenerateContentRequest], + Awaitable[~.GenerateContentResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "stream_generate_content" not in self._stubs: + self._stubs["stream_generate_content"] = self.grpc_channel.unary_stream( + "/google.ai.generativelanguage.v1beta.GenerativeService/StreamGenerateContent", + request_serializer=generative_service.GenerateContentRequest.serialize, + response_deserializer=generative_service.GenerateContentResponse.deserialize, + ) + return self._stubs["stream_generate_content"] + + @property + def embed_content( + self, + ) -> Callable[ + [generative_service.EmbedContentRequest], + Awaitable[generative_service.EmbedContentResponse], + ]: + r"""Return a callable for the embed content method over gRPC. + + Generates an embedding from the model given an input + ``Content``. + + Returns: + Callable[[~.EmbedContentRequest], + Awaitable[~.EmbedContentResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "embed_content" not in self._stubs: + self._stubs["embed_content"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.GenerativeService/EmbedContent", + request_serializer=generative_service.EmbedContentRequest.serialize, + response_deserializer=generative_service.EmbedContentResponse.deserialize, + ) + return self._stubs["embed_content"] + + @property + def batch_embed_contents( + self, + ) -> Callable[ + [generative_service.BatchEmbedContentsRequest], + Awaitable[generative_service.BatchEmbedContentsResponse], + ]: + r"""Return a callable for the batch embed contents method over gRPC. + + Generates multiple embeddings from the model given + input text in a synchronous call. + + Returns: + Callable[[~.BatchEmbedContentsRequest], + Awaitable[~.BatchEmbedContentsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_embed_contents" not in self._stubs: + self._stubs["batch_embed_contents"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.GenerativeService/BatchEmbedContents", + request_serializer=generative_service.BatchEmbedContentsRequest.serialize, + response_deserializer=generative_service.BatchEmbedContentsResponse.deserialize, + ) + return self._stubs["batch_embed_contents"] + + @property + def count_tokens( + self, + ) -> Callable[ + [generative_service.CountTokensRequest], + Awaitable[generative_service.CountTokensResponse], + ]: + r"""Return a callable for the count tokens method over gRPC. + + Runs a model's tokenizer on input content and returns + the token count. + + Returns: + Callable[[~.CountTokensRequest], + Awaitable[~.CountTokensResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "count_tokens" not in self._stubs: + self._stubs["count_tokens"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.GenerativeService/CountTokens", + request_serializer=generative_service.CountTokensRequest.serialize, + response_deserializer=generative_service.CountTokensResponse.deserialize, + ) + return self._stubs["count_tokens"] + + def close(self): + return self.grpc_channel.close() + + +__all__ = ("GenerativeServiceGrpcAsyncIOTransport",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/generative_service/transports/rest.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/generative_service/transports/rest.py new file mode 100644 index 000000000000..5e91c374f274 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/generative_service/transports/rest.py @@ -0,0 +1,1051 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import dataclasses +import json # type: ignore +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + +from google.api_core import gapic_v1, path_template, rest_helpers, rest_streaming +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.transport.requests import AuthorizedSession # type: ignore +from google.protobuf import json_format +import grpc # type: ignore +from requests import __version__ as requests_version + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + + +from google.longrunning import operations_pb2 # type: ignore + +from google.ai.generativelanguage_v1beta.types import generative_service + +from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO +from .base import GenerativeServiceTransport + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + + +class GenerativeServiceRestInterceptor: + """Interceptor for GenerativeService. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the GenerativeServiceRestTransport. + + .. code-block:: python + class MyCustomGenerativeServiceInterceptor(GenerativeServiceRestInterceptor): + def pre_batch_embed_contents(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_batch_embed_contents(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_count_tokens(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_count_tokens(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_embed_content(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_embed_content(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_generate_answer(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_generate_answer(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_generate_content(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_generate_content(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_stream_generate_content(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_stream_generate_content(self, response): + logging.log(f"Received response: {response}") + return response + + transport = GenerativeServiceRestTransport(interceptor=MyCustomGenerativeServiceInterceptor()) + client = GenerativeServiceClient(transport=transport) + + + """ + + def pre_batch_embed_contents( + self, + request: generative_service.BatchEmbedContentsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[generative_service.BatchEmbedContentsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for batch_embed_contents + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenerativeService server. + """ + return request, metadata + + def post_batch_embed_contents( + self, response: generative_service.BatchEmbedContentsResponse + ) -> generative_service.BatchEmbedContentsResponse: + """Post-rpc interceptor for batch_embed_contents + + Override in a subclass to manipulate the response + after it is returned by the GenerativeService server but before + it is returned to user code. + """ + return response + + def pre_count_tokens( + self, + request: generative_service.CountTokensRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[generative_service.CountTokensRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for count_tokens + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenerativeService server. + """ + return request, metadata + + def post_count_tokens( + self, response: generative_service.CountTokensResponse + ) -> generative_service.CountTokensResponse: + """Post-rpc interceptor for count_tokens + + Override in a subclass to manipulate the response + after it is returned by the GenerativeService server but before + it is returned to user code. + """ + return response + + def pre_embed_content( + self, + request: generative_service.EmbedContentRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[generative_service.EmbedContentRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for embed_content + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenerativeService server. + """ + return request, metadata + + def post_embed_content( + self, response: generative_service.EmbedContentResponse + ) -> generative_service.EmbedContentResponse: + """Post-rpc interceptor for embed_content + + Override in a subclass to manipulate the response + after it is returned by the GenerativeService server but before + it is returned to user code. + """ + return response + + def pre_generate_answer( + self, + request: generative_service.GenerateAnswerRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[generative_service.GenerateAnswerRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for generate_answer + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenerativeService server. + """ + return request, metadata + + def post_generate_answer( + self, response: generative_service.GenerateAnswerResponse + ) -> generative_service.GenerateAnswerResponse: + """Post-rpc interceptor for generate_answer + + Override in a subclass to manipulate the response + after it is returned by the GenerativeService server but before + it is returned to user code. + """ + return response + + def pre_generate_content( + self, + request: generative_service.GenerateContentRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[generative_service.GenerateContentRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for generate_content + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenerativeService server. + """ + return request, metadata + + def post_generate_content( + self, response: generative_service.GenerateContentResponse + ) -> generative_service.GenerateContentResponse: + """Post-rpc interceptor for generate_content + + Override in a subclass to manipulate the response + after it is returned by the GenerativeService server but before + it is returned to user code. + """ + return response + + def pre_stream_generate_content( + self, + request: generative_service.GenerateContentRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[generative_service.GenerateContentRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for stream_generate_content + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenerativeService server. + """ + return request, metadata + + def post_stream_generate_content( + self, response: rest_streaming.ResponseIterator + ) -> rest_streaming.ResponseIterator: + """Post-rpc interceptor for stream_generate_content + + Override in a subclass to manipulate the response + after it is returned by the GenerativeService server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class GenerativeServiceRestStub: + _session: AuthorizedSession + _host: str + _interceptor: GenerativeServiceRestInterceptor + + +class GenerativeServiceRestTransport(GenerativeServiceTransport): + """REST backend transport for GenerativeService. + + API for using Large Models that generate multimodal content + and have additional capabilities beyond text generation. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + + """ + + def __init__( + self, + *, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + interceptor: Optional[GenerativeServiceRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError( + f"Unexpected hostname structure: {host}" + ) # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST + ) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or GenerativeServiceRestInterceptor() + self._prep_wrapped_messages(client_info) + + class _BatchEmbedContents(GenerativeServiceRestStub): + def __hash__(self): + return hash("BatchEmbedContents") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: generative_service.BatchEmbedContentsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> generative_service.BatchEmbedContentsResponse: + r"""Call the batch embed contents method over HTTP. + + Args: + request (~.generative_service.BatchEmbedContentsRequest): + The request object. Batch request to get embeddings from + the model for a list of prompts. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.generative_service.BatchEmbedContentsResponse: + The response to a ``BatchEmbedContentsRequest``. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta/{model=models/*}:batchEmbedContents", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_batch_embed_contents( + request, metadata + ) + pb_request = generative_service.BatchEmbedContentsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = generative_service.BatchEmbedContentsResponse() + pb_resp = generative_service.BatchEmbedContentsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_batch_embed_contents(resp) + return resp + + class _CountTokens(GenerativeServiceRestStub): + def __hash__(self): + return hash("CountTokens") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: generative_service.CountTokensRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> generative_service.CountTokensResponse: + r"""Call the count tokens method over HTTP. + + Args: + request (~.generative_service.CountTokensRequest): + The request object. Counts the number of tokens in the ``prompt`` sent to a + model. + + Models may tokenize text differently, so each model may + return a different ``token_count``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.generative_service.CountTokensResponse: + A response from ``CountTokens``. + + It returns the model's ``token_count`` for the + ``prompt``. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta/{model=models/*}:countTokens", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_count_tokens(request, metadata) + pb_request = generative_service.CountTokensRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = generative_service.CountTokensResponse() + pb_resp = generative_service.CountTokensResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_count_tokens(resp) + return resp + + class _EmbedContent(GenerativeServiceRestStub): + def __hash__(self): + return hash("EmbedContent") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: generative_service.EmbedContentRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> generative_service.EmbedContentResponse: + r"""Call the embed content method over HTTP. + + Args: + request (~.generative_service.EmbedContentRequest): + The request object. Request containing the ``Content`` for the model to + embed. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.generative_service.EmbedContentResponse: + The response to an ``EmbedContentRequest``. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta/{model=models/*}:embedContent", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_embed_content(request, metadata) + pb_request = generative_service.EmbedContentRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = generative_service.EmbedContentResponse() + pb_resp = generative_service.EmbedContentResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_embed_content(resp) + return resp + + class _GenerateAnswer(GenerativeServiceRestStub): + def __hash__(self): + return hash("GenerateAnswer") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: generative_service.GenerateAnswerRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> generative_service.GenerateAnswerResponse: + r"""Call the generate answer method over HTTP. + + Args: + request (~.generative_service.GenerateAnswerRequest): + The request object. Request to generate a grounded answer + from the model. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.generative_service.GenerateAnswerResponse: + Response from the model for a + grounded answer. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta/{model=models/*}:generateAnswer", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_generate_answer(request, metadata) + pb_request = generative_service.GenerateAnswerRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = generative_service.GenerateAnswerResponse() + pb_resp = generative_service.GenerateAnswerResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_generate_answer(resp) + return resp + + class _GenerateContent(GenerativeServiceRestStub): + def __hash__(self): + return hash("GenerateContent") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: generative_service.GenerateContentRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> generative_service.GenerateContentResponse: + r"""Call the generate content method over HTTP. + + Args: + request (~.generative_service.GenerateContentRequest): + The request object. Request to generate a completion from + the model. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.generative_service.GenerateContentResponse: + Response from the model supporting multiple candidates. + + Note on safety ratings and content filtering. They are + reported for both prompt in + ``GenerateContentResponse.prompt_feedback`` and for each + candidate in ``finish_reason`` and in + ``safety_ratings``. The API contract is that: + + - either all requested candidates are returned or no + candidates at all + - no candidates are returned only if there was + something wrong with the prompt (see + ``prompt_feedback``) + - feedback on each candidate is reported on + ``finish_reason`` and ``safety_ratings``. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta/{model=models/*}:generateContent", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_generate_content( + request, metadata + ) + pb_request = generative_service.GenerateContentRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = generative_service.GenerateContentResponse() + pb_resp = generative_service.GenerateContentResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_generate_content(resp) + return resp + + class _StreamGenerateContent(GenerativeServiceRestStub): + def __hash__(self): + return hash("StreamGenerateContent") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: generative_service.GenerateContentRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> rest_streaming.ResponseIterator: + r"""Call the stream generate content method over HTTP. + + Args: + request (~.generative_service.GenerateContentRequest): + The request object. Request to generate a completion from + the model. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.generative_service.GenerateContentResponse: + Response from the model supporting multiple candidates. + + Note on safety ratings and content filtering. They are + reported for both prompt in + ``GenerateContentResponse.prompt_feedback`` and for each + candidate in ``finish_reason`` and in + ``safety_ratings``. The API contract is that: + + - either all requested candidates are returned or no + candidates at all + - no candidates are returned only if there was + something wrong with the prompt (see + ``prompt_feedback``) + - feedback on each candidate is reported on + ``finish_reason`` and ``safety_ratings``. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta/{model=models/*}:streamGenerateContent", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_stream_generate_content( + request, metadata + ) + pb_request = generative_service.GenerateContentRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = rest_streaming.ResponseIterator( + response, generative_service.GenerateContentResponse + ) + resp = self._interceptor.post_stream_generate_content(resp) + return resp + + @property + def batch_embed_contents( + self, + ) -> Callable[ + [generative_service.BatchEmbedContentsRequest], + generative_service.BatchEmbedContentsResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._BatchEmbedContents(self._session, self._host, self._interceptor) # type: ignore + + @property + def count_tokens( + self, + ) -> Callable[ + [generative_service.CountTokensRequest], generative_service.CountTokensResponse + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CountTokens(self._session, self._host, self._interceptor) # type: ignore + + @property + def embed_content( + self, + ) -> Callable[ + [generative_service.EmbedContentRequest], + generative_service.EmbedContentResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._EmbedContent(self._session, self._host, self._interceptor) # type: ignore + + @property + def generate_answer( + self, + ) -> Callable[ + [generative_service.GenerateAnswerRequest], + generative_service.GenerateAnswerResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GenerateAnswer(self._session, self._host, self._interceptor) # type: ignore + + @property + def generate_content( + self, + ) -> Callable[ + [generative_service.GenerateContentRequest], + generative_service.GenerateContentResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GenerateContent(self._session, self._host, self._interceptor) # type: ignore + + @property + def stream_generate_content( + self, + ) -> Callable[ + [generative_service.GenerateContentRequest], + generative_service.GenerateContentResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._StreamGenerateContent(self._session, self._host, self._interceptor) # type: ignore + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__ = ("GenerativeServiceRestTransport",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/__init__.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/__init__.py new file mode 100644 index 000000000000..5738b8bf4239 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .async_client import ModelServiceAsyncClient +from .client import ModelServiceClient + +__all__ = ( + "ModelServiceClient", + "ModelServiceAsyncClient", +) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/async_client.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/async_client.py new file mode 100644 index 000000000000..d82ec5481b16 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/async_client.py @@ -0,0 +1,1101 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry_async as retries +from google.api_core.client_options import ClientOptions +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.ai.generativelanguage_v1beta import gapic_version as package_version + +try: + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + +from google.ai.generativelanguage_v1beta.services.model_service import pagers +from google.ai.generativelanguage_v1beta.types import tuned_model as gag_tuned_model +from google.ai.generativelanguage_v1beta.types import model, model_service +from google.ai.generativelanguage_v1beta.types import tuned_model + +from .client import ModelServiceClient +from .transports.base import DEFAULT_CLIENT_INFO, ModelServiceTransport +from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport + + +class ModelServiceAsyncClient: + """Provides methods for getting metadata information about + Generative Models. + """ + + _client: ModelServiceClient + + DEFAULT_ENDPOINT = ModelServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = ModelServiceClient.DEFAULT_MTLS_ENDPOINT + + model_path = staticmethod(ModelServiceClient.model_path) + parse_model_path = staticmethod(ModelServiceClient.parse_model_path) + tuned_model_path = staticmethod(ModelServiceClient.tuned_model_path) + parse_tuned_model_path = staticmethod(ModelServiceClient.parse_tuned_model_path) + common_billing_account_path = staticmethod( + ModelServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + ModelServiceClient.parse_common_billing_account_path + ) + common_folder_path = staticmethod(ModelServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(ModelServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(ModelServiceClient.common_organization_path) + parse_common_organization_path = staticmethod( + ModelServiceClient.parse_common_organization_path + ) + common_project_path = staticmethod(ModelServiceClient.common_project_path) + parse_common_project_path = staticmethod( + ModelServiceClient.parse_common_project_path + ) + common_location_path = staticmethod(ModelServiceClient.common_location_path) + parse_common_location_path = staticmethod( + ModelServiceClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ModelServiceAsyncClient: The constructed client. + """ + return ModelServiceClient.from_service_account_info.__func__(ModelServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ModelServiceAsyncClient: The constructed client. + """ + return ModelServiceClient.from_service_account_file.__func__(ModelServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return ModelServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> ModelServiceTransport: + """Returns the transport used by the client instance. + + Returns: + ModelServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial( + type(ModelServiceClient).get_transport_class, type(ModelServiceClient) + ) + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, ModelServiceTransport] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the model service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.ModelServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = ModelServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def get_model( + self, + request: Optional[Union[model_service.GetModelRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model.Model: + r"""Gets information about a specific Model. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_get_model(): + # Create a client + client = generativelanguage_v1beta.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GetModelRequest( + name="name_value", + ) + + # Make the request + response = await client.get_model(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.GetModelRequest, dict]]): + The request object. Request for getting information about + a specific Model. + name (:class:`str`): + Required. The resource name of the model. + + This name should match a model name returned by the + ``ListModels`` method. + + Format: ``models/{model}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.Model: + Information about a Generative + Language Model. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = model_service.GetModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_model, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_models( + self, + request: Optional[Union[model_service.ListModelsRequest, dict]] = None, + *, + page_size: Optional[int] = None, + page_token: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelsAsyncPager: + r"""Lists models available through the API. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_list_models(): + # Create a client + client = generativelanguage_v1beta.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.ListModelsRequest( + ) + + # Make the request + page_result = client.list_models(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.ListModelsRequest, dict]]): + The request object. Request for listing all Models. + page_size (:class:`int`): + The maximum number of ``Models`` to return (per page). + + The service may return fewer models. If unspecified, at + most 50 models will be returned per page. This method + returns at most 1000 models per page, even if you pass a + larger page_size. + + This corresponds to the ``page_size`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + page_token (:class:`str`): + A page token, received from a previous ``ListModels`` + call. + + Provide the ``page_token`` returned by one request as an + argument to the next request to retrieve the next page. + + When paginating, all other parameters provided to + ``ListModels`` must match the call that provided the + page token. + + This corresponds to the ``page_token`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.services.model_service.pagers.ListModelsAsyncPager: + Response from ListModel containing a paginated list of + Models. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([page_size, page_token]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = model_service.ListModelsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if page_size is not None: + request.page_size = page_size + if page_token is not None: + request.page_token = page_token + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_models, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListModelsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_tuned_model( + self, + request: Optional[Union[model_service.GetTunedModelRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tuned_model.TunedModel: + r"""Gets information about a specific TunedModel. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_get_tuned_model(): + # Create a client + client = generativelanguage_v1beta.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GetTunedModelRequest( + name="name_value", + ) + + # Make the request + response = await client.get_tuned_model(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.GetTunedModelRequest, dict]]): + The request object. Request for getting information about + a specific Model. + name (:class:`str`): + Required. The resource name of the model. + + Format: ``tunedModels/my-model-id`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.TunedModel: + A fine-tuned model created using + ModelService.CreateTunedModel. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = model_service.GetTunedModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_tuned_model, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_tuned_models( + self, + request: Optional[Union[model_service.ListTunedModelsRequest, dict]] = None, + *, + page_size: Optional[int] = None, + page_token: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTunedModelsAsyncPager: + r"""Lists tuned models owned by the user. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_list_tuned_models(): + # Create a client + client = generativelanguage_v1beta.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.ListTunedModelsRequest( + ) + + # Make the request + page_result = client.list_tuned_models(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.ListTunedModelsRequest, dict]]): + The request object. Request for listing TunedModels. + page_size (:class:`int`): + Optional. The maximum number of ``TunedModels`` to + return (per page). The service may return fewer tuned + models. + + If unspecified, at most 10 tuned models will be + returned. This method returns at most 1000 models per + page, even if you pass a larger page_size. + + This corresponds to the ``page_size`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + page_token (:class:`str`): + Optional. A page token, received from a previous + ``ListTunedModels`` call. + + Provide the ``page_token`` returned by one request as an + argument to the next request to retrieve the next page. + + When paginating, all other parameters provided to + ``ListTunedModels`` must match the call that provided + the page token. + + This corresponds to the ``page_token`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.services.model_service.pagers.ListTunedModelsAsyncPager: + Response from ListTunedModels containing a paginated + list of Models. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([page_size, page_token]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = model_service.ListTunedModelsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if page_size is not None: + request.page_size = page_size + if page_token is not None: + request.page_token = page_token + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_tuned_models, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListTunedModelsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def create_tuned_model( + self, + request: Optional[Union[model_service.CreateTunedModelRequest, dict]] = None, + *, + tuned_model: Optional[gag_tuned_model.TunedModel] = None, + tuned_model_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a tuned model. Intermediate tuning progress (if any) is + accessed through the [google.longrunning.Operations] service. + + Status and results can be accessed through the Operations + service. Example: GET + /v1/tunedModels/az2mb0bpw6i/operations/000-111-222 + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_create_tuned_model(): + # Create a client + client = generativelanguage_v1beta.ModelServiceAsyncClient() + + # Initialize request argument(s) + tuned_model = generativelanguage_v1beta.TunedModel() + tuned_model.tuning_task.training_data.examples.examples.text_input = "text_input_value" + tuned_model.tuning_task.training_data.examples.examples.output = "output_value" + + request = generativelanguage_v1beta.CreateTunedModelRequest( + tuned_model=tuned_model, + ) + + # Make the request + operation = client.create_tuned_model(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.CreateTunedModelRequest, dict]]): + The request object. Request to create a TunedModel. + tuned_model (:class:`google.ai.generativelanguage_v1beta.types.TunedModel`): + Required. The tuned model to create. + This corresponds to the ``tuned_model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tuned_model_id (:class:`str`): + Optional. The unique id for the tuned model if + specified. This value should be up to 40 characters, the + first character must be a letter, the last could be a + letter or a number. The id must match the regular + expression: `a-z <[a-z0-9-]{0,38}[a-z0-9]>`__?. + + This corresponds to the ``tuned_model_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.ai.generativelanguage_v1beta.types.TunedModel` + A fine-tuned model created using + ModelService.CreateTunedModel. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tuned_model, tuned_model_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = model_service.CreateTunedModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tuned_model is not None: + request.tuned_model = tuned_model + if tuned_model_id is not None: + request.tuned_model_id = tuned_model_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_tuned_model, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gag_tuned_model.TunedModel, + metadata_type=model_service.CreateTunedModelMetadata, + ) + + # Done; return the response. + return response + + async def update_tuned_model( + self, + request: Optional[Union[model_service.UpdateTunedModelRequest, dict]] = None, + *, + tuned_model: Optional[gag_tuned_model.TunedModel] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gag_tuned_model.TunedModel: + r"""Updates a tuned model. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_update_tuned_model(): + # Create a client + client = generativelanguage_v1beta.ModelServiceAsyncClient() + + # Initialize request argument(s) + tuned_model = generativelanguage_v1beta.TunedModel() + tuned_model.tuning_task.training_data.examples.examples.text_input = "text_input_value" + tuned_model.tuning_task.training_data.examples.examples.output = "output_value" + + request = generativelanguage_v1beta.UpdateTunedModelRequest( + tuned_model=tuned_model, + ) + + # Make the request + response = await client.update_tuned_model(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.UpdateTunedModelRequest, dict]]): + The request object. Request to update a TunedModel. + tuned_model (:class:`google.ai.generativelanguage_v1beta.types.TunedModel`): + Required. The tuned model to update. + This corresponds to the ``tuned_model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The list of fields to + update. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.TunedModel: + A fine-tuned model created using + ModelService.CreateTunedModel. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tuned_model, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = model_service.UpdateTunedModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tuned_model is not None: + request.tuned_model = tuned_model + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_tuned_model, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("tuned_model.name", request.tuned_model.name),) + ), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_tuned_model( + self, + request: Optional[Union[model_service.DeleteTunedModelRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a tuned model. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_delete_tuned_model(): + # Create a client + client = generativelanguage_v1beta.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.DeleteTunedModelRequest( + name="name_value", + ) + + # Make the request + await client.delete_tuned_model(request=request) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.DeleteTunedModelRequest, dict]]): + The request object. Request to delete a TunedModel. + name (:class:`str`): + Required. The resource name of the model. Format: + ``tunedModels/my-model-id`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = model_service.DeleteTunedModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_tuned_model, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def __aenter__(self) -> "ModelServiceAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("ModelServiceAsyncClient",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/client.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/client.py new file mode 100644 index 000000000000..f98851c12c22 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/client.py @@ -0,0 +1,1282 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.ai.generativelanguage_v1beta import gapic_version as package_version + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + +from google.ai.generativelanguage_v1beta.services.model_service import pagers +from google.ai.generativelanguage_v1beta.types import tuned_model as gag_tuned_model +from google.ai.generativelanguage_v1beta.types import model, model_service +from google.ai.generativelanguage_v1beta.types import tuned_model + +from .transports.base import DEFAULT_CLIENT_INFO, ModelServiceTransport +from .transports.grpc import ModelServiceGrpcTransport +from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport +from .transports.rest import ModelServiceRestTransport + + +class ModelServiceClientMeta(type): + """Metaclass for the ModelService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = OrderedDict() # type: Dict[str, Type[ModelServiceTransport]] + _transport_registry["grpc"] = ModelServiceGrpcTransport + _transport_registry["grpc_asyncio"] = ModelServiceGrpcAsyncIOTransport + _transport_registry["rest"] = ModelServiceRestTransport + + def get_transport_class( + cls, + label: Optional[str] = None, + ) -> Type[ModelServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class ModelServiceClient(metaclass=ModelServiceClientMeta): + """Provides methods for getting metadata information about + Generative Models. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "generativelanguage.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ModelServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ModelServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> ModelServiceTransport: + """Returns the transport used by the client instance. + + Returns: + ModelServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def model_path( + model: str, + ) -> str: + """Returns a fully-qualified model string.""" + return "models/{model}".format( + model=model, + ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str, str]: + """Parses a model path into its component segments.""" + m = re.match(r"^models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def tuned_model_path( + tuned_model: str, + ) -> str: + """Returns a fully-qualified tuned_model string.""" + return "tunedModels/{tuned_model}".format( + tuned_model=tuned_model, + ) + + @staticmethod + def parse_tuned_model_path(path: str) -> Dict[str, str]: + """Parses a tuned_model path into its component segments.""" + m = re.match(r"^tunedModels/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path( + billing_account: str, + ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path( + folder: str, + ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format( + folder=folder, + ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path( + organization: str, + ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format( + organization=organization, + ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path( + project: str, + ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format( + project=project, + ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path( + project: str, + location: str, + ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[client_options_lib.ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[Union[str, ModelServiceTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the model service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ModelServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + client_options = cast(client_options_lib.ClientOptions, client_options) + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( + client_options + ) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError( + "client_options.api_key and credentials are mutually exclusive" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, ModelServiceTransport): + # transport is a ModelServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr( + google.auth._default, "get_api_key_credentials" + ): + credentials = google.auth._default.get_api_key_credentials( + api_key_value + ) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=client_options.api_audience, + ) + + def get_model( + self, + request: Optional[Union[model_service.GetModelRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model.Model: + r"""Gets information about a specific Model. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_get_model(): + # Create a client + client = generativelanguage_v1beta.ModelServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GetModelRequest( + name="name_value", + ) + + # Make the request + response = client.get_model(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.GetModelRequest, dict]): + The request object. Request for getting information about + a specific Model. + name (str): + Required. The resource name of the model. + + This name should match a model name returned by the + ``ListModels`` method. + + Format: ``models/{model}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.Model: + Information about a Generative + Language Model. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.GetModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.GetModelRequest): + request = model_service.GetModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_models( + self, + request: Optional[Union[model_service.ListModelsRequest, dict]] = None, + *, + page_size: Optional[int] = None, + page_token: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelsPager: + r"""Lists models available through the API. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_list_models(): + # Create a client + client = generativelanguage_v1beta.ModelServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.ListModelsRequest( + ) + + # Make the request + page_result = client.list_models(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.ListModelsRequest, dict]): + The request object. Request for listing all Models. + page_size (int): + The maximum number of ``Models`` to return (per page). + + The service may return fewer models. If unspecified, at + most 50 models will be returned per page. This method + returns at most 1000 models per page, even if you pass a + larger page_size. + + This corresponds to the ``page_size`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + page_token (str): + A page token, received from a previous ``ListModels`` + call. + + Provide the ``page_token`` returned by one request as an + argument to the next request to retrieve the next page. + + When paginating, all other parameters provided to + ``ListModels`` must match the call that provided the + page token. + + This corresponds to the ``page_token`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.services.model_service.pagers.ListModelsPager: + Response from ListModel containing a paginated list of + Models. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([page_size, page_token]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.ListModelsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.ListModelsRequest): + request = model_service.ListModelsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if page_size is not None: + request.page_size = page_size + if page_token is not None: + request.page_token = page_token + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_models] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListModelsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_tuned_model( + self, + request: Optional[Union[model_service.GetTunedModelRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tuned_model.TunedModel: + r"""Gets information about a specific TunedModel. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_get_tuned_model(): + # Create a client + client = generativelanguage_v1beta.ModelServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GetTunedModelRequest( + name="name_value", + ) + + # Make the request + response = client.get_tuned_model(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.GetTunedModelRequest, dict]): + The request object. Request for getting information about + a specific Model. + name (str): + Required. The resource name of the model. + + Format: ``tunedModels/my-model-id`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.TunedModel: + A fine-tuned model created using + ModelService.CreateTunedModel. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.GetTunedModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.GetTunedModelRequest): + request = model_service.GetTunedModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_tuned_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_tuned_models( + self, + request: Optional[Union[model_service.ListTunedModelsRequest, dict]] = None, + *, + page_size: Optional[int] = None, + page_token: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTunedModelsPager: + r"""Lists tuned models owned by the user. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_list_tuned_models(): + # Create a client + client = generativelanguage_v1beta.ModelServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.ListTunedModelsRequest( + ) + + # Make the request + page_result = client.list_tuned_models(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.ListTunedModelsRequest, dict]): + The request object. Request for listing TunedModels. + page_size (int): + Optional. The maximum number of ``TunedModels`` to + return (per page). The service may return fewer tuned + models. + + If unspecified, at most 10 tuned models will be + returned. This method returns at most 1000 models per + page, even if you pass a larger page_size. + + This corresponds to the ``page_size`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + page_token (str): + Optional. A page token, received from a previous + ``ListTunedModels`` call. + + Provide the ``page_token`` returned by one request as an + argument to the next request to retrieve the next page. + + When paginating, all other parameters provided to + ``ListTunedModels`` must match the call that provided + the page token. + + This corresponds to the ``page_token`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.services.model_service.pagers.ListTunedModelsPager: + Response from ListTunedModels containing a paginated + list of Models. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([page_size, page_token]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.ListTunedModelsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.ListTunedModelsRequest): + request = model_service.ListTunedModelsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if page_size is not None: + request.page_size = page_size + if page_token is not None: + request.page_token = page_token + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_tuned_models] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListTunedModelsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def create_tuned_model( + self, + request: Optional[Union[model_service.CreateTunedModelRequest, dict]] = None, + *, + tuned_model: Optional[gag_tuned_model.TunedModel] = None, + tuned_model_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Creates a tuned model. Intermediate tuning progress (if any) is + accessed through the [google.longrunning.Operations] service. + + Status and results can be accessed through the Operations + service. Example: GET + /v1/tunedModels/az2mb0bpw6i/operations/000-111-222 + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_create_tuned_model(): + # Create a client + client = generativelanguage_v1beta.ModelServiceClient() + + # Initialize request argument(s) + tuned_model = generativelanguage_v1beta.TunedModel() + tuned_model.tuning_task.training_data.examples.examples.text_input = "text_input_value" + tuned_model.tuning_task.training_data.examples.examples.output = "output_value" + + request = generativelanguage_v1beta.CreateTunedModelRequest( + tuned_model=tuned_model, + ) + + # Make the request + operation = client.create_tuned_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.CreateTunedModelRequest, dict]): + The request object. Request to create a TunedModel. + tuned_model (google.ai.generativelanguage_v1beta.types.TunedModel): + Required. The tuned model to create. + This corresponds to the ``tuned_model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tuned_model_id (str): + Optional. The unique id for the tuned model if + specified. This value should be up to 40 characters, the + first character must be a letter, the last could be a + letter or a number. The id must match the regular + expression: `a-z <[a-z0-9-]{0,38}[a-z0-9]>`__?. + + This corresponds to the ``tuned_model_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.ai.generativelanguage_v1beta.types.TunedModel` + A fine-tuned model created using + ModelService.CreateTunedModel. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tuned_model, tuned_model_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.CreateTunedModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.CreateTunedModelRequest): + request = model_service.CreateTunedModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tuned_model is not None: + request.tuned_model = tuned_model + if tuned_model_id is not None: + request.tuned_model_id = tuned_model_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_tuned_model] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + gag_tuned_model.TunedModel, + metadata_type=model_service.CreateTunedModelMetadata, + ) + + # Done; return the response. + return response + + def update_tuned_model( + self, + request: Optional[Union[model_service.UpdateTunedModelRequest, dict]] = None, + *, + tuned_model: Optional[gag_tuned_model.TunedModel] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gag_tuned_model.TunedModel: + r"""Updates a tuned model. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_update_tuned_model(): + # Create a client + client = generativelanguage_v1beta.ModelServiceClient() + + # Initialize request argument(s) + tuned_model = generativelanguage_v1beta.TunedModel() + tuned_model.tuning_task.training_data.examples.examples.text_input = "text_input_value" + tuned_model.tuning_task.training_data.examples.examples.output = "output_value" + + request = generativelanguage_v1beta.UpdateTunedModelRequest( + tuned_model=tuned_model, + ) + + # Make the request + response = client.update_tuned_model(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.UpdateTunedModelRequest, dict]): + The request object. Request to update a TunedModel. + tuned_model (google.ai.generativelanguage_v1beta.types.TunedModel): + Required. The tuned model to update. + This corresponds to the ``tuned_model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The list of fields to + update. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.TunedModel: + A fine-tuned model created using + ModelService.CreateTunedModel. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tuned_model, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.UpdateTunedModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.UpdateTunedModelRequest): + request = model_service.UpdateTunedModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tuned_model is not None: + request.tuned_model = tuned_model + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_tuned_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("tuned_model.name", request.tuned_model.name),) + ), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_tuned_model( + self, + request: Optional[Union[model_service.DeleteTunedModelRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a tuned model. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_delete_tuned_model(): + # Create a client + client = generativelanguage_v1beta.ModelServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.DeleteTunedModelRequest( + name="name_value", + ) + + # Make the request + client.delete_tuned_model(request=request) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.DeleteTunedModelRequest, dict]): + The request object. Request to delete a TunedModel. + name (str): + Required. The resource name of the model. Format: + ``tunedModels/my-model-id`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.DeleteTunedModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.DeleteTunedModelRequest): + request = model_service.DeleteTunedModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_tuned_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def __enter__(self) -> "ModelServiceClient": + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("ModelServiceClient",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/pagers.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/pagers.py new file mode 100644 index 000000000000..46fe5efdaf33 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/pagers.py @@ -0,0 +1,283 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import ( + Any, + AsyncIterator, + Awaitable, + Callable, + Iterator, + Optional, + Sequence, + Tuple, +) + +from google.ai.generativelanguage_v1beta.types import model, model_service, tuned_model + + +class ListModelsPager: + """A pager for iterating through ``list_models`` requests. + + This class thinly wraps an initial + :class:`google.ai.generativelanguage_v1beta.types.ListModelsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``models`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListModels`` requests and continue to iterate + through the ``models`` field on the + corresponding responses. + + All the usual :class:`google.ai.generativelanguage_v1beta.types.ListModelsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., model_service.ListModelsResponse], + request: model_service.ListModelsRequest, + response: model_service.ListModelsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.ai.generativelanguage_v1beta.types.ListModelsRequest): + The initial request object. + response (google.ai.generativelanguage_v1beta.types.ListModelsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListModelsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[model_service.ListModelsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[model.Model]: + for page in self.pages: + yield from page.models + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListModelsAsyncPager: + """A pager for iterating through ``list_models`` requests. + + This class thinly wraps an initial + :class:`google.ai.generativelanguage_v1beta.types.ListModelsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``models`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListModels`` requests and continue to iterate + through the ``models`` field on the + corresponding responses. + + All the usual :class:`google.ai.generativelanguage_v1beta.types.ListModelsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[model_service.ListModelsResponse]], + request: model_service.ListModelsRequest, + response: model_service.ListModelsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.ai.generativelanguage_v1beta.types.ListModelsRequest): + The initial request object. + response (google.ai.generativelanguage_v1beta.types.ListModelsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListModelsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[model_service.ListModelsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[model.Model]: + async def async_generator(): + async for page in self.pages: + for response in page.models: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListTunedModelsPager: + """A pager for iterating through ``list_tuned_models`` requests. + + This class thinly wraps an initial + :class:`google.ai.generativelanguage_v1beta.types.ListTunedModelsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``tuned_models`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListTunedModels`` requests and continue to iterate + through the ``tuned_models`` field on the + corresponding responses. + + All the usual :class:`google.ai.generativelanguage_v1beta.types.ListTunedModelsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., model_service.ListTunedModelsResponse], + request: model_service.ListTunedModelsRequest, + response: model_service.ListTunedModelsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.ai.generativelanguage_v1beta.types.ListTunedModelsRequest): + The initial request object. + response (google.ai.generativelanguage_v1beta.types.ListTunedModelsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListTunedModelsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[model_service.ListTunedModelsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[tuned_model.TunedModel]: + for page in self.pages: + yield from page.tuned_models + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListTunedModelsAsyncPager: + """A pager for iterating through ``list_tuned_models`` requests. + + This class thinly wraps an initial + :class:`google.ai.generativelanguage_v1beta.types.ListTunedModelsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``tuned_models`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListTunedModels`` requests and continue to iterate + through the ``tuned_models`` field on the + corresponding responses. + + All the usual :class:`google.ai.generativelanguage_v1beta.types.ListTunedModelsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[model_service.ListTunedModelsResponse]], + request: model_service.ListTunedModelsRequest, + response: model_service.ListTunedModelsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.ai.generativelanguage_v1beta.types.ListTunedModelsRequest): + The initial request object. + response (google.ai.generativelanguage_v1beta.types.ListTunedModelsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListTunedModelsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[model_service.ListTunedModelsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[tuned_model.TunedModel]: + async def async_generator(): + async for page in self.pages: + for response in page.tuned_models: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/transports/__init__.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/transports/__init__.py new file mode 100644 index 000000000000..1b430a25489e --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/transports/__init__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import ModelServiceTransport +from .grpc import ModelServiceGrpcTransport +from .grpc_asyncio import ModelServiceGrpcAsyncIOTransport +from .rest import ModelServiceRestInterceptor, ModelServiceRestTransport + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[ModelServiceTransport]] +_transport_registry["grpc"] = ModelServiceGrpcTransport +_transport_registry["grpc_asyncio"] = ModelServiceGrpcAsyncIOTransport +_transport_registry["rest"] = ModelServiceRestTransport + +__all__ = ( + "ModelServiceTransport", + "ModelServiceGrpcTransport", + "ModelServiceGrpcAsyncIOTransport", + "ModelServiceRestTransport", + "ModelServiceRestInterceptor", +) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/transports/base.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/transports/base.py new file mode 100644 index 000000000000..1e6ab506236e --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/transports/base.py @@ -0,0 +1,316 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1, operations_v1 +from google.api_core import retry as retries +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account # type: ignore +from google.protobuf import empty_pb2 # type: ignore + +from google.ai.generativelanguage_v1beta import gapic_version as package_version +from google.ai.generativelanguage_v1beta.types import tuned_model as gag_tuned_model +from google.ai.generativelanguage_v1beta.types import model, model_service +from google.ai.generativelanguage_v1beta.types import tuned_model + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +class ModelServiceTransport(abc.ABC): + """Abstract transport class for ModelService.""" + + AUTH_SCOPES = () + + DEFAULT_HOST: str = "generativelanguage.googleapis.com" + + def __init__( + self, + *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default( + **scopes_kwargs, quota_project_id=quota_project_id + ) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience( + api_audience if api_audience else host + ) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if ( + always_use_jwt_access + and isinstance(credentials, service_account.Credentials) + and hasattr(service_account.Credentials, "with_always_use_jwt_access") + ): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.get_model: gapic_v1.method.wrap_method( + self.get_model, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_models: gapic_v1.method.wrap_method( + self.list_models, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_tuned_model: gapic_v1.method.wrap_method( + self.get_tuned_model, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_tuned_models: gapic_v1.method.wrap_method( + self.list_tuned_models, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.create_tuned_model: gapic_v1.method.wrap_method( + self.create_tuned_model, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.update_tuned_model: gapic_v1.method.wrap_method( + self.update_tuned_model, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.delete_tuned_model: gapic_v1.method.wrap_method( + self.delete_tuned_model, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def get_model( + self, + ) -> Callable[ + [model_service.GetModelRequest], Union[model.Model, Awaitable[model.Model]] + ]: + raise NotImplementedError() + + @property + def list_models( + self, + ) -> Callable[ + [model_service.ListModelsRequest], + Union[ + model_service.ListModelsResponse, + Awaitable[model_service.ListModelsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_tuned_model( + self, + ) -> Callable[ + [model_service.GetTunedModelRequest], + Union[tuned_model.TunedModel, Awaitable[tuned_model.TunedModel]], + ]: + raise NotImplementedError() + + @property + def list_tuned_models( + self, + ) -> Callable[ + [model_service.ListTunedModelsRequest], + Union[ + model_service.ListTunedModelsResponse, + Awaitable[model_service.ListTunedModelsResponse], + ], + ]: + raise NotImplementedError() + + @property + def create_tuned_model( + self, + ) -> Callable[ + [model_service.CreateTunedModelRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def update_tuned_model( + self, + ) -> Callable[ + [model_service.UpdateTunedModelRequest], + Union[gag_tuned_model.TunedModel, Awaitable[gag_tuned_model.TunedModel]], + ]: + raise NotImplementedError() + + @property + def delete_tuned_model( + self, + ) -> Callable[ + [model_service.DeleteTunedModelRequest], + Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ("ModelServiceTransport",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/transports/grpc.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/transports/grpc.py new file mode 100644 index 000000000000..a6cb0699799a --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/transports/grpc.py @@ -0,0 +1,446 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +from google.api_core import gapic_v1, grpc_helpers, operations_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +import grpc # type: ignore + +from google.ai.generativelanguage_v1beta.types import tuned_model as gag_tuned_model +from google.ai.generativelanguage_v1beta.types import model, model_service +from google.ai.generativelanguage_v1beta.types import tuned_model + +from .base import DEFAULT_CLIENT_INFO, ModelServiceTransport + + +class ModelServiceGrpcTransport(ModelServiceTransport): + """gRPC backend transport for ModelService. + + Provides methods for getting metadata information about + Generative Models. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service.""" + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient(self.grpc_channel) + + # Return the client from cache. + return self._operations_client + + @property + def get_model(self) -> Callable[[model_service.GetModelRequest], model.Model]: + r"""Return a callable for the get model method over gRPC. + + Gets information about a specific Model. + + Returns: + Callable[[~.GetModelRequest], + ~.Model]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_model" not in self._stubs: + self._stubs["get_model"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.ModelService/GetModel", + request_serializer=model_service.GetModelRequest.serialize, + response_deserializer=model.Model.deserialize, + ) + return self._stubs["get_model"] + + @property + def list_models( + self, + ) -> Callable[[model_service.ListModelsRequest], model_service.ListModelsResponse]: + r"""Return a callable for the list models method over gRPC. + + Lists models available through the API. + + Returns: + Callable[[~.ListModelsRequest], + ~.ListModelsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_models" not in self._stubs: + self._stubs["list_models"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.ModelService/ListModels", + request_serializer=model_service.ListModelsRequest.serialize, + response_deserializer=model_service.ListModelsResponse.deserialize, + ) + return self._stubs["list_models"] + + @property + def get_tuned_model( + self, + ) -> Callable[[model_service.GetTunedModelRequest], tuned_model.TunedModel]: + r"""Return a callable for the get tuned model method over gRPC. + + Gets information about a specific TunedModel. + + Returns: + Callable[[~.GetTunedModelRequest], + ~.TunedModel]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_tuned_model" not in self._stubs: + self._stubs["get_tuned_model"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.ModelService/GetTunedModel", + request_serializer=model_service.GetTunedModelRequest.serialize, + response_deserializer=tuned_model.TunedModel.deserialize, + ) + return self._stubs["get_tuned_model"] + + @property + def list_tuned_models( + self, + ) -> Callable[ + [model_service.ListTunedModelsRequest], model_service.ListTunedModelsResponse + ]: + r"""Return a callable for the list tuned models method over gRPC. + + Lists tuned models owned by the user. + + Returns: + Callable[[~.ListTunedModelsRequest], + ~.ListTunedModelsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_tuned_models" not in self._stubs: + self._stubs["list_tuned_models"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.ModelService/ListTunedModels", + request_serializer=model_service.ListTunedModelsRequest.serialize, + response_deserializer=model_service.ListTunedModelsResponse.deserialize, + ) + return self._stubs["list_tuned_models"] + + @property + def create_tuned_model( + self, + ) -> Callable[[model_service.CreateTunedModelRequest], operations_pb2.Operation]: + r"""Return a callable for the create tuned model method over gRPC. + + Creates a tuned model. Intermediate tuning progress (if any) is + accessed through the [google.longrunning.Operations] service. + + Status and results can be accessed through the Operations + service. Example: GET + /v1/tunedModels/az2mb0bpw6i/operations/000-111-222 + + Returns: + Callable[[~.CreateTunedModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_tuned_model" not in self._stubs: + self._stubs["create_tuned_model"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.ModelService/CreateTunedModel", + request_serializer=model_service.CreateTunedModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_tuned_model"] + + @property + def update_tuned_model( + self, + ) -> Callable[[model_service.UpdateTunedModelRequest], gag_tuned_model.TunedModel]: + r"""Return a callable for the update tuned model method over gRPC. + + Updates a tuned model. + + Returns: + Callable[[~.UpdateTunedModelRequest], + ~.TunedModel]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_tuned_model" not in self._stubs: + self._stubs["update_tuned_model"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.ModelService/UpdateTunedModel", + request_serializer=model_service.UpdateTunedModelRequest.serialize, + response_deserializer=gag_tuned_model.TunedModel.deserialize, + ) + return self._stubs["update_tuned_model"] + + @property + def delete_tuned_model( + self, + ) -> Callable[[model_service.DeleteTunedModelRequest], empty_pb2.Empty]: + r"""Return a callable for the delete tuned model method over gRPC. + + Deletes a tuned model. + + Returns: + Callable[[~.DeleteTunedModelRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_tuned_model" not in self._stubs: + self._stubs["delete_tuned_model"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.ModelService/DeleteTunedModel", + request_serializer=model_service.DeleteTunedModelRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["delete_tuned_model"] + + def close(self): + self.grpc_channel.close() + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ("ModelServiceGrpcTransport",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/transports/grpc_asyncio.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/transports/grpc_asyncio.py new file mode 100644 index 000000000000..f8e65fe0fb83 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/transports/grpc_asyncio.py @@ -0,0 +1,458 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +from google.api_core import gapic_v1, grpc_helpers_async, operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.ai.generativelanguage_v1beta.types import tuned_model as gag_tuned_model +from google.ai.generativelanguage_v1beta.types import model, model_service +from google.ai.generativelanguage_v1beta.types import tuned_model + +from .base import DEFAULT_CLIENT_INFO, ModelServiceTransport +from .grpc import ModelServiceGrpcTransport + + +class ModelServiceGrpcAsyncIOTransport(ModelServiceTransport): + """gRPC AsyncIO backend transport for ModelService. + + Provides methods for getting metadata information about + Generative Models. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def get_model( + self, + ) -> Callable[[model_service.GetModelRequest], Awaitable[model.Model]]: + r"""Return a callable for the get model method over gRPC. + + Gets information about a specific Model. + + Returns: + Callable[[~.GetModelRequest], + Awaitable[~.Model]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_model" not in self._stubs: + self._stubs["get_model"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.ModelService/GetModel", + request_serializer=model_service.GetModelRequest.serialize, + response_deserializer=model.Model.deserialize, + ) + return self._stubs["get_model"] + + @property + def list_models( + self, + ) -> Callable[ + [model_service.ListModelsRequest], Awaitable[model_service.ListModelsResponse] + ]: + r"""Return a callable for the list models method over gRPC. + + Lists models available through the API. + + Returns: + Callable[[~.ListModelsRequest], + Awaitable[~.ListModelsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_models" not in self._stubs: + self._stubs["list_models"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.ModelService/ListModels", + request_serializer=model_service.ListModelsRequest.serialize, + response_deserializer=model_service.ListModelsResponse.deserialize, + ) + return self._stubs["list_models"] + + @property + def get_tuned_model( + self, + ) -> Callable[ + [model_service.GetTunedModelRequest], Awaitable[tuned_model.TunedModel] + ]: + r"""Return a callable for the get tuned model method over gRPC. + + Gets information about a specific TunedModel. + + Returns: + Callable[[~.GetTunedModelRequest], + Awaitable[~.TunedModel]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_tuned_model" not in self._stubs: + self._stubs["get_tuned_model"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.ModelService/GetTunedModel", + request_serializer=model_service.GetTunedModelRequest.serialize, + response_deserializer=tuned_model.TunedModel.deserialize, + ) + return self._stubs["get_tuned_model"] + + @property + def list_tuned_models( + self, + ) -> Callable[ + [model_service.ListTunedModelsRequest], + Awaitable[model_service.ListTunedModelsResponse], + ]: + r"""Return a callable for the list tuned models method over gRPC. + + Lists tuned models owned by the user. + + Returns: + Callable[[~.ListTunedModelsRequest], + Awaitable[~.ListTunedModelsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_tuned_models" not in self._stubs: + self._stubs["list_tuned_models"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.ModelService/ListTunedModels", + request_serializer=model_service.ListTunedModelsRequest.serialize, + response_deserializer=model_service.ListTunedModelsResponse.deserialize, + ) + return self._stubs["list_tuned_models"] + + @property + def create_tuned_model( + self, + ) -> Callable[ + [model_service.CreateTunedModelRequest], Awaitable[operations_pb2.Operation] + ]: + r"""Return a callable for the create tuned model method over gRPC. + + Creates a tuned model. Intermediate tuning progress (if any) is + accessed through the [google.longrunning.Operations] service. + + Status and results can be accessed through the Operations + service. Example: GET + /v1/tunedModels/az2mb0bpw6i/operations/000-111-222 + + Returns: + Callable[[~.CreateTunedModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_tuned_model" not in self._stubs: + self._stubs["create_tuned_model"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.ModelService/CreateTunedModel", + request_serializer=model_service.CreateTunedModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_tuned_model"] + + @property + def update_tuned_model( + self, + ) -> Callable[ + [model_service.UpdateTunedModelRequest], Awaitable[gag_tuned_model.TunedModel] + ]: + r"""Return a callable for the update tuned model method over gRPC. + + Updates a tuned model. + + Returns: + Callable[[~.UpdateTunedModelRequest], + Awaitable[~.TunedModel]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_tuned_model" not in self._stubs: + self._stubs["update_tuned_model"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.ModelService/UpdateTunedModel", + request_serializer=model_service.UpdateTunedModelRequest.serialize, + response_deserializer=gag_tuned_model.TunedModel.deserialize, + ) + return self._stubs["update_tuned_model"] + + @property + def delete_tuned_model( + self, + ) -> Callable[[model_service.DeleteTunedModelRequest], Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the delete tuned model method over gRPC. + + Deletes a tuned model. + + Returns: + Callable[[~.DeleteTunedModelRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_tuned_model" not in self._stubs: + self._stubs["delete_tuned_model"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.ModelService/DeleteTunedModel", + request_serializer=model_service.DeleteTunedModelRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["delete_tuned_model"] + + def close(self): + return self.grpc_channel.close() + + +__all__ = ("ModelServiceGrpcAsyncIOTransport",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/transports/rest.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/transports/rest.py new file mode 100644 index 000000000000..73c0afe522ff --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/model_service/transports/rest.py @@ -0,0 +1,1082 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import dataclasses +import json # type: ignore +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + +from google.api_core import ( + gapic_v1, + operations_v1, + path_template, + rest_helpers, + rest_streaming, +) +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.transport.requests import AuthorizedSession # type: ignore +from google.protobuf import json_format +import grpc # type: ignore +from requests import __version__ as requests_version + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + + +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore + +from google.ai.generativelanguage_v1beta.types import tuned_model as gag_tuned_model +from google.ai.generativelanguage_v1beta.types import model, model_service +from google.ai.generativelanguage_v1beta.types import tuned_model + +from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO +from .base import ModelServiceTransport + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + + +class ModelServiceRestInterceptor: + """Interceptor for ModelService. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the ModelServiceRestTransport. + + .. code-block:: python + class MyCustomModelServiceInterceptor(ModelServiceRestInterceptor): + def pre_create_tuned_model(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_tuned_model(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_delete_tuned_model(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def pre_get_model(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_model(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_tuned_model(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_tuned_model(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_models(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_models(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_tuned_models(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_tuned_models(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_update_tuned_model(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_tuned_model(self, response): + logging.log(f"Received response: {response}") + return response + + transport = ModelServiceRestTransport(interceptor=MyCustomModelServiceInterceptor()) + client = ModelServiceClient(transport=transport) + + + """ + + def pre_create_tuned_model( + self, + request: model_service.CreateTunedModelRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[model_service.CreateTunedModelRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for create_tuned_model + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelService server. + """ + return request, metadata + + def post_create_tuned_model( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for create_tuned_model + + Override in a subclass to manipulate the response + after it is returned by the ModelService server but before + it is returned to user code. + """ + return response + + def pre_delete_tuned_model( + self, + request: model_service.DeleteTunedModelRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[model_service.DeleteTunedModelRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for delete_tuned_model + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelService server. + """ + return request, metadata + + def pre_get_model( + self, + request: model_service.GetModelRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[model_service.GetModelRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_model + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelService server. + """ + return request, metadata + + def post_get_model(self, response: model.Model) -> model.Model: + """Post-rpc interceptor for get_model + + Override in a subclass to manipulate the response + after it is returned by the ModelService server but before + it is returned to user code. + """ + return response + + def pre_get_tuned_model( + self, + request: model_service.GetTunedModelRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[model_service.GetTunedModelRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_tuned_model + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelService server. + """ + return request, metadata + + def post_get_tuned_model( + self, response: tuned_model.TunedModel + ) -> tuned_model.TunedModel: + """Post-rpc interceptor for get_tuned_model + + Override in a subclass to manipulate the response + after it is returned by the ModelService server but before + it is returned to user code. + """ + return response + + def pre_list_models( + self, + request: model_service.ListModelsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[model_service.ListModelsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_models + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelService server. + """ + return request, metadata + + def post_list_models( + self, response: model_service.ListModelsResponse + ) -> model_service.ListModelsResponse: + """Post-rpc interceptor for list_models + + Override in a subclass to manipulate the response + after it is returned by the ModelService server but before + it is returned to user code. + """ + return response + + def pre_list_tuned_models( + self, + request: model_service.ListTunedModelsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[model_service.ListTunedModelsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_tuned_models + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelService server. + """ + return request, metadata + + def post_list_tuned_models( + self, response: model_service.ListTunedModelsResponse + ) -> model_service.ListTunedModelsResponse: + """Post-rpc interceptor for list_tuned_models + + Override in a subclass to manipulate the response + after it is returned by the ModelService server but before + it is returned to user code. + """ + return response + + def pre_update_tuned_model( + self, + request: model_service.UpdateTunedModelRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[model_service.UpdateTunedModelRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for update_tuned_model + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelService server. + """ + return request, metadata + + def post_update_tuned_model( + self, response: gag_tuned_model.TunedModel + ) -> gag_tuned_model.TunedModel: + """Post-rpc interceptor for update_tuned_model + + Override in a subclass to manipulate the response + after it is returned by the ModelService server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class ModelServiceRestStub: + _session: AuthorizedSession + _host: str + _interceptor: ModelServiceRestInterceptor + + +class ModelServiceRestTransport(ModelServiceTransport): + """REST backend transport for ModelService. + + Provides methods for getting metadata information about + Generative Models. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + + """ + + def __init__( + self, + *, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + interceptor: Optional[ModelServiceRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError( + f"Unexpected hostname structure: {host}" + ) # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST + ) + self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or ModelServiceRestInterceptor() + self._prep_wrapped_messages(client_info) + + @property + def operations_client(self) -> operations_v1.AbstractOperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Only create a new client if we do not already have one. + if self._operations_client is None: + http_options: Dict[str, List[Dict[str, str]]] = {} + + rest_transport = operations_v1.OperationsRestTransport( + host=self._host, + # use the credentials which are saved + credentials=self._credentials, + scopes=self._scopes, + http_options=http_options, + path_prefix="v1beta", + ) + + self._operations_client = operations_v1.AbstractOperationsClient( + transport=rest_transport + ) + + # Return the client from cache. + return self._operations_client + + class _CreateTunedModel(ModelServiceRestStub): + def __hash__(self): + return hash("CreateTunedModel") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: model_service.CreateTunedModelRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the create tuned model method over HTTP. + + Args: + request (~.model_service.CreateTunedModelRequest): + The request object. Request to create a TunedModel. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta/tunedModels", + "body": "tuned_model", + }, + ] + request, metadata = self._interceptor.pre_create_tuned_model( + request, metadata + ) + pb_request = model_service.CreateTunedModelRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_tuned_model(resp) + return resp + + class _DeleteTunedModel(ModelServiceRestStub): + def __hash__(self): + return hash("DeleteTunedModel") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: model_service.DeleteTunedModelRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ): + r"""Call the delete tuned model method over HTTP. + + Args: + request (~.model_service.DeleteTunedModelRequest): + The request object. Request to delete a TunedModel. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v1beta/{name=tunedModels/*}", + }, + ] + request, metadata = self._interceptor.pre_delete_tuned_model( + request, metadata + ) + pb_request = model_service.DeleteTunedModelRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + class _GetModel(ModelServiceRestStub): + def __hash__(self): + return hash("GetModel") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: model_service.GetModelRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model.Model: + r"""Call the get model method over HTTP. + + Args: + request (~.model_service.GetModelRequest): + The request object. Request for getting information about + a specific Model. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.model.Model: + Information about a Generative + Language Model. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1beta/{name=models/*}", + }, + ] + request, metadata = self._interceptor.pre_get_model(request, metadata) + pb_request = model_service.GetModelRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = model.Model() + pb_resp = model.Model.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_model(resp) + return resp + + class _GetTunedModel(ModelServiceRestStub): + def __hash__(self): + return hash("GetTunedModel") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: model_service.GetTunedModelRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tuned_model.TunedModel: + r"""Call the get tuned model method over HTTP. + + Args: + request (~.model_service.GetTunedModelRequest): + The request object. Request for getting information about + a specific Model. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.tuned_model.TunedModel: + A fine-tuned model created using + ModelService.CreateTunedModel. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1beta/{name=tunedModels/*}", + }, + ] + request, metadata = self._interceptor.pre_get_tuned_model(request, metadata) + pb_request = model_service.GetTunedModelRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = tuned_model.TunedModel() + pb_resp = tuned_model.TunedModel.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_tuned_model(resp) + return resp + + class _ListModels(ModelServiceRestStub): + def __hash__(self): + return hash("ListModels") + + def __call__( + self, + request: model_service.ListModelsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_service.ListModelsResponse: + r"""Call the list models method over HTTP. + + Args: + request (~.model_service.ListModelsRequest): + The request object. Request for listing all Models. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.model_service.ListModelsResponse: + Response from ``ListModel`` containing a paginated list + of Models. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1beta/models", + }, + ] + request, metadata = self._interceptor.pre_list_models(request, metadata) + pb_request = model_service.ListModelsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = model_service.ListModelsResponse() + pb_resp = model_service.ListModelsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_models(resp) + return resp + + class _ListTunedModels(ModelServiceRestStub): + def __hash__(self): + return hash("ListTunedModels") + + def __call__( + self, + request: model_service.ListTunedModelsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_service.ListTunedModelsResponse: + r"""Call the list tuned models method over HTTP. + + Args: + request (~.model_service.ListTunedModelsRequest): + The request object. Request for listing TunedModels. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.model_service.ListTunedModelsResponse: + Response from ``ListTunedModels`` containing a paginated + list of Models. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1beta/tunedModels", + }, + ] + request, metadata = self._interceptor.pre_list_tuned_models( + request, metadata + ) + pb_request = model_service.ListTunedModelsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = model_service.ListTunedModelsResponse() + pb_resp = model_service.ListTunedModelsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_tuned_models(resp) + return resp + + class _UpdateTunedModel(ModelServiceRestStub): + def __hash__(self): + return hash("UpdateTunedModel") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "updateMask": {}, + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: model_service.UpdateTunedModelRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gag_tuned_model.TunedModel: + r"""Call the update tuned model method over HTTP. + + Args: + request (~.model_service.UpdateTunedModelRequest): + The request object. Request to update a TunedModel. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gag_tuned_model.TunedModel: + A fine-tuned model created using + ModelService.CreateTunedModel. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v1beta/{tuned_model.name=tunedModels/*}", + "body": "tuned_model", + }, + ] + request, metadata = self._interceptor.pre_update_tuned_model( + request, metadata + ) + pb_request = model_service.UpdateTunedModelRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = gag_tuned_model.TunedModel() + pb_resp = gag_tuned_model.TunedModel.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_tuned_model(resp) + return resp + + @property + def create_tuned_model( + self, + ) -> Callable[[model_service.CreateTunedModelRequest], operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateTunedModel(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_tuned_model( + self, + ) -> Callable[[model_service.DeleteTunedModelRequest], empty_pb2.Empty]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteTunedModel(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_model(self) -> Callable[[model_service.GetModelRequest], model.Model]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetModel(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_tuned_model( + self, + ) -> Callable[[model_service.GetTunedModelRequest], tuned_model.TunedModel]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetTunedModel(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_models( + self, + ) -> Callable[[model_service.ListModelsRequest], model_service.ListModelsResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListModels(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_tuned_models( + self, + ) -> Callable[ + [model_service.ListTunedModelsRequest], model_service.ListTunedModelsResponse + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListTunedModels(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_tuned_model( + self, + ) -> Callable[[model_service.UpdateTunedModelRequest], gag_tuned_model.TunedModel]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateTunedModel(self._session, self._host, self._interceptor) # type: ignore + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__ = ("ModelServiceRestTransport",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/__init__.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/__init__.py new file mode 100644 index 000000000000..7cd02e1fc232 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .async_client import PermissionServiceAsyncClient +from .client import PermissionServiceClient + +__all__ = ( + "PermissionServiceClient", + "PermissionServiceAsyncClient", +) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/async_client.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/async_client.py new file mode 100644 index 000000000000..45bf05885ea0 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/async_client.py @@ -0,0 +1,971 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry_async as retries +from google.api_core.client_options import ClientOptions +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.ai.generativelanguage_v1beta import gapic_version as package_version + +try: + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore + +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore + +from google.ai.generativelanguage_v1beta.services.permission_service import pagers +from google.ai.generativelanguage_v1beta.types import permission as gag_permission +from google.ai.generativelanguage_v1beta.types import permission +from google.ai.generativelanguage_v1beta.types import permission_service + +from .client import PermissionServiceClient +from .transports.base import DEFAULT_CLIENT_INFO, PermissionServiceTransport +from .transports.grpc_asyncio import PermissionServiceGrpcAsyncIOTransport + + +class PermissionServiceAsyncClient: + """Provides methods for managing permissions to PaLM API + resources. + """ + + _client: PermissionServiceClient + + DEFAULT_ENDPOINT = PermissionServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = PermissionServiceClient.DEFAULT_MTLS_ENDPOINT + + permission_path = staticmethod(PermissionServiceClient.permission_path) + parse_permission_path = staticmethod(PermissionServiceClient.parse_permission_path) + common_billing_account_path = staticmethod( + PermissionServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + PermissionServiceClient.parse_common_billing_account_path + ) + common_folder_path = staticmethod(PermissionServiceClient.common_folder_path) + parse_common_folder_path = staticmethod( + PermissionServiceClient.parse_common_folder_path + ) + common_organization_path = staticmethod( + PermissionServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + PermissionServiceClient.parse_common_organization_path + ) + common_project_path = staticmethod(PermissionServiceClient.common_project_path) + parse_common_project_path = staticmethod( + PermissionServiceClient.parse_common_project_path + ) + common_location_path = staticmethod(PermissionServiceClient.common_location_path) + parse_common_location_path = staticmethod( + PermissionServiceClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PermissionServiceAsyncClient: The constructed client. + """ + return PermissionServiceClient.from_service_account_info.__func__(PermissionServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PermissionServiceAsyncClient: The constructed client. + """ + return PermissionServiceClient.from_service_account_file.__func__(PermissionServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return PermissionServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> PermissionServiceTransport: + """Returns the transport used by the client instance. + + Returns: + PermissionServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial( + type(PermissionServiceClient).get_transport_class, type(PermissionServiceClient) + ) + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, PermissionServiceTransport] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the permission service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.PermissionServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = PermissionServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def create_permission( + self, + request: Optional[ + Union[permission_service.CreatePermissionRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + permission: Optional[gag_permission.Permission] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gag_permission.Permission: + r"""Create a permission to a specific resource. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_create_permission(): + # Create a client + client = generativelanguage_v1beta.PermissionServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.CreatePermissionRequest( + parent="parent_value", + ) + + # Make the request + response = await client.create_permission(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.CreatePermissionRequest, dict]]): + The request object. Request to create a ``Permission``. + parent (:class:`str`): + Required. The parent resource of the ``Permission``. + Formats: ``tunedModels/{tuned_model}`` + ``corpora/{corpus}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + permission (:class:`google.ai.generativelanguage_v1beta.types.Permission`): + Required. The permission to create. + This corresponds to the ``permission`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.Permission: + Permission resource grants user, + group or the rest of the world access to + the PaLM API resource (e.g. a tuned + model, corpus). + + A role is a collection of permitted + operations that allows users to perform + specific actions on PaLM API resources. + To make them available to users, groups, + or service accounts, you assign roles. + When you assign a role, you grant + permissions that the role contains. + + There are three concentric roles. Each + role is a superset of the previous + role's permitted operations: + + - reader can use the resource (e.g. + tuned model, corpus) for inference + - writer has reader's permissions and + additionally can edit and share + - owner has writer's permissions and + additionally can delete + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, permission]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = permission_service.CreatePermissionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if permission is not None: + request.permission = permission + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_permission, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_permission( + self, + request: Optional[Union[permission_service.GetPermissionRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> permission.Permission: + r"""Gets information about a specific Permission. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_get_permission(): + # Create a client + client = generativelanguage_v1beta.PermissionServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GetPermissionRequest( + name="name_value", + ) + + # Make the request + response = await client.get_permission(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.GetPermissionRequest, dict]]): + The request object. Request for getting information about a specific + ``Permission``. + name (:class:`str`): + Required. The resource name of the permission. + + Formats: + ``tunedModels/{tuned_model}/permissions/{permission}`` + ``corpora/{corpus}/permissions/{permission}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.Permission: + Permission resource grants user, + group or the rest of the world access to + the PaLM API resource (e.g. a tuned + model, corpus). + + A role is a collection of permitted + operations that allows users to perform + specific actions on PaLM API resources. + To make them available to users, groups, + or service accounts, you assign roles. + When you assign a role, you grant + permissions that the role contains. + + There are three concentric roles. Each + role is a superset of the previous + role's permitted operations: + + - reader can use the resource (e.g. + tuned model, corpus) for inference + - writer has reader's permissions and + additionally can edit and share + - owner has writer's permissions and + additionally can delete + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = permission_service.GetPermissionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_permission, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_permissions( + self, + request: Optional[ + Union[permission_service.ListPermissionsRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPermissionsAsyncPager: + r"""Lists permissions for the specific resource. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_list_permissions(): + # Create a client + client = generativelanguage_v1beta.PermissionServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.ListPermissionsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_permissions(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.ListPermissionsRequest, dict]]): + The request object. Request for listing permissions. + parent (:class:`str`): + Required. The parent resource of the permissions. + Formats: ``tunedModels/{tuned_model}`` + ``corpora/{corpus}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.services.permission_service.pagers.ListPermissionsAsyncPager: + Response from ListPermissions containing a paginated list of + permissions. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = permission_service.ListPermissionsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListPermissionsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_permission( + self, + request: Optional[ + Union[permission_service.UpdatePermissionRequest, dict] + ] = None, + *, + permission: Optional[gag_permission.Permission] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gag_permission.Permission: + r"""Updates the permission. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_update_permission(): + # Create a client + client = generativelanguage_v1beta.PermissionServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.UpdatePermissionRequest( + ) + + # Make the request + response = await client.update_permission(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.UpdatePermissionRequest, dict]]): + The request object. Request to update the ``Permission``. + permission (:class:`google.ai.generativelanguage_v1beta.types.Permission`): + Required. The permission to update. + + The permission's ``name`` field is used to identify the + permission to update. + + This corresponds to the ``permission`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The list of fields to update. Accepted ones: + + - role (``Permission.role`` field) + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.Permission: + Permission resource grants user, + group or the rest of the world access to + the PaLM API resource (e.g. a tuned + model, corpus). + + A role is a collection of permitted + operations that allows users to perform + specific actions on PaLM API resources. + To make them available to users, groups, + or service accounts, you assign roles. + When you assign a role, you grant + permissions that the role contains. + + There are three concentric roles. Each + role is a superset of the previous + role's permitted operations: + + - reader can use the resource (e.g. + tuned model, corpus) for inference + - writer has reader's permissions and + additionally can edit and share + - owner has writer's permissions and + additionally can delete + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([permission, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = permission_service.UpdatePermissionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if permission is not None: + request.permission = permission + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_permission, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("permission.name", request.permission.name),) + ), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_permission( + self, + request: Optional[ + Union[permission_service.DeletePermissionRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes the permission. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_delete_permission(): + # Create a client + client = generativelanguage_v1beta.PermissionServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.DeletePermissionRequest( + name="name_value", + ) + + # Make the request + await client.delete_permission(request=request) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.DeletePermissionRequest, dict]]): + The request object. Request to delete the ``Permission``. + name (:class:`str`): + Required. The resource name of the permission. Formats: + ``tunedModels/{tuned_model}/permissions/{permission}`` + ``corpora/{corpus}/permissions/{permission}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = permission_service.DeletePermissionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_permission, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def transfer_ownership( + self, + request: Optional[ + Union[permission_service.TransferOwnershipRequest, dict] + ] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> permission_service.TransferOwnershipResponse: + r"""Transfers ownership of the tuned model. + This is the only way to change ownership of the tuned + model. The current owner will be downgraded to writer + role. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_transfer_ownership(): + # Create a client + client = generativelanguage_v1beta.PermissionServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.TransferOwnershipRequest( + name="name_value", + email_address="email_address_value", + ) + + # Make the request + response = await client.transfer_ownership(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.TransferOwnershipRequest, dict]]): + The request object. Request to transfer the ownership of + the tuned model. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.TransferOwnershipResponse: + Response from TransferOwnership. + """ + # Create or coerce a protobuf request object. + request = permission_service.TransferOwnershipRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.transfer_ownership, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def __aenter__(self) -> "PermissionServiceAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("PermissionServiceAsyncClient",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/client.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/client.py new file mode 100644 index 000000000000..ffe293de8c42 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/client.py @@ -0,0 +1,1160 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.ai.generativelanguage_v1beta import gapic_version as package_version + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore + +from google.ai.generativelanguage_v1beta.services.permission_service import pagers +from google.ai.generativelanguage_v1beta.types import permission as gag_permission +from google.ai.generativelanguage_v1beta.types import permission +from google.ai.generativelanguage_v1beta.types import permission_service + +from .transports.base import DEFAULT_CLIENT_INFO, PermissionServiceTransport +from .transports.grpc import PermissionServiceGrpcTransport +from .transports.grpc_asyncio import PermissionServiceGrpcAsyncIOTransport +from .transports.rest import PermissionServiceRestTransport + + +class PermissionServiceClientMeta(type): + """Metaclass for the PermissionService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[PermissionServiceTransport]] + _transport_registry["grpc"] = PermissionServiceGrpcTransport + _transport_registry["grpc_asyncio"] = PermissionServiceGrpcAsyncIOTransport + _transport_registry["rest"] = PermissionServiceRestTransport + + def get_transport_class( + cls, + label: Optional[str] = None, + ) -> Type[PermissionServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class PermissionServiceClient(metaclass=PermissionServiceClientMeta): + """Provides methods for managing permissions to PaLM API + resources. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "generativelanguage.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PermissionServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PermissionServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> PermissionServiceTransport: + """Returns the transport used by the client instance. + + Returns: + PermissionServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def permission_path( + tuned_model: str, + permission: str, + ) -> str: + """Returns a fully-qualified permission string.""" + return "tunedModels/{tuned_model}/permissions/{permission}".format( + tuned_model=tuned_model, + permission=permission, + ) + + @staticmethod + def parse_permission_path(path: str) -> Dict[str, str]: + """Parses a permission path into its component segments.""" + m = re.match( + r"^tunedModels/(?P.+?)/permissions/(?P.+?)$", path + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path( + billing_account: str, + ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path( + folder: str, + ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format( + folder=folder, + ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path( + organization: str, + ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format( + organization=organization, + ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path( + project: str, + ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format( + project=project, + ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path( + project: str, + location: str, + ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[client_options_lib.ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[Union[str, PermissionServiceTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the permission service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, PermissionServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + client_options = cast(client_options_lib.ClientOptions, client_options) + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( + client_options + ) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError( + "client_options.api_key and credentials are mutually exclusive" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, PermissionServiceTransport): + # transport is a PermissionServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr( + google.auth._default, "get_api_key_credentials" + ): + credentials = google.auth._default.get_api_key_credentials( + api_key_value + ) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=client_options.api_audience, + ) + + def create_permission( + self, + request: Optional[ + Union[permission_service.CreatePermissionRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + permission: Optional[gag_permission.Permission] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gag_permission.Permission: + r"""Create a permission to a specific resource. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_create_permission(): + # Create a client + client = generativelanguage_v1beta.PermissionServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.CreatePermissionRequest( + parent="parent_value", + ) + + # Make the request + response = client.create_permission(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.CreatePermissionRequest, dict]): + The request object. Request to create a ``Permission``. + parent (str): + Required. The parent resource of the ``Permission``. + Formats: ``tunedModels/{tuned_model}`` + ``corpora/{corpus}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + permission (google.ai.generativelanguage_v1beta.types.Permission): + Required. The permission to create. + This corresponds to the ``permission`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.Permission: + Permission resource grants user, + group or the rest of the world access to + the PaLM API resource (e.g. a tuned + model, corpus). + + A role is a collection of permitted + operations that allows users to perform + specific actions on PaLM API resources. + To make them available to users, groups, + or service accounts, you assign roles. + When you assign a role, you grant + permissions that the role contains. + + There are three concentric roles. Each + role is a superset of the previous + role's permitted operations: + + - reader can use the resource (e.g. + tuned model, corpus) for inference + - writer has reader's permissions and + additionally can edit and share + - owner has writer's permissions and + additionally can delete + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, permission]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a permission_service.CreatePermissionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, permission_service.CreatePermissionRequest): + request = permission_service.CreatePermissionRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if permission is not None: + request.permission = permission + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_permission] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_permission( + self, + request: Optional[Union[permission_service.GetPermissionRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> permission.Permission: + r"""Gets information about a specific Permission. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_get_permission(): + # Create a client + client = generativelanguage_v1beta.PermissionServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GetPermissionRequest( + name="name_value", + ) + + # Make the request + response = client.get_permission(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.GetPermissionRequest, dict]): + The request object. Request for getting information about a specific + ``Permission``. + name (str): + Required. The resource name of the permission. + + Formats: + ``tunedModels/{tuned_model}/permissions/{permission}`` + ``corpora/{corpus}/permissions/{permission}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.Permission: + Permission resource grants user, + group or the rest of the world access to + the PaLM API resource (e.g. a tuned + model, corpus). + + A role is a collection of permitted + operations that allows users to perform + specific actions on PaLM API resources. + To make them available to users, groups, + or service accounts, you assign roles. + When you assign a role, you grant + permissions that the role contains. + + There are three concentric roles. Each + role is a superset of the previous + role's permitted operations: + + - reader can use the resource (e.g. + tuned model, corpus) for inference + - writer has reader's permissions and + additionally can edit and share + - owner has writer's permissions and + additionally can delete + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a permission_service.GetPermissionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, permission_service.GetPermissionRequest): + request = permission_service.GetPermissionRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_permission] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_permissions( + self, + request: Optional[ + Union[permission_service.ListPermissionsRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPermissionsPager: + r"""Lists permissions for the specific resource. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_list_permissions(): + # Create a client + client = generativelanguage_v1beta.PermissionServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.ListPermissionsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_permissions(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.ListPermissionsRequest, dict]): + The request object. Request for listing permissions. + parent (str): + Required. The parent resource of the permissions. + Formats: ``tunedModels/{tuned_model}`` + ``corpora/{corpus}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.services.permission_service.pagers.ListPermissionsPager: + Response from ListPermissions containing a paginated list of + permissions. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a permission_service.ListPermissionsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, permission_service.ListPermissionsRequest): + request = permission_service.ListPermissionsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_permissions] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPermissionsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_permission( + self, + request: Optional[ + Union[permission_service.UpdatePermissionRequest, dict] + ] = None, + *, + permission: Optional[gag_permission.Permission] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gag_permission.Permission: + r"""Updates the permission. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_update_permission(): + # Create a client + client = generativelanguage_v1beta.PermissionServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.UpdatePermissionRequest( + ) + + # Make the request + response = client.update_permission(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.UpdatePermissionRequest, dict]): + The request object. Request to update the ``Permission``. + permission (google.ai.generativelanguage_v1beta.types.Permission): + Required. The permission to update. + + The permission's ``name`` field is used to identify the + permission to update. + + This corresponds to the ``permission`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The list of fields to update. Accepted ones: + + - role (``Permission.role`` field) + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.Permission: + Permission resource grants user, + group or the rest of the world access to + the PaLM API resource (e.g. a tuned + model, corpus). + + A role is a collection of permitted + operations that allows users to perform + specific actions on PaLM API resources. + To make them available to users, groups, + or service accounts, you assign roles. + When you assign a role, you grant + permissions that the role contains. + + There are three concentric roles. Each + role is a superset of the previous + role's permitted operations: + + - reader can use the resource (e.g. + tuned model, corpus) for inference + - writer has reader's permissions and + additionally can edit and share + - owner has writer's permissions and + additionally can delete + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([permission, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a permission_service.UpdatePermissionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, permission_service.UpdatePermissionRequest): + request = permission_service.UpdatePermissionRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if permission is not None: + request.permission = permission + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_permission] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("permission.name", request.permission.name),) + ), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_permission( + self, + request: Optional[ + Union[permission_service.DeletePermissionRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes the permission. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_delete_permission(): + # Create a client + client = generativelanguage_v1beta.PermissionServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.DeletePermissionRequest( + name="name_value", + ) + + # Make the request + client.delete_permission(request=request) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.DeletePermissionRequest, dict]): + The request object. Request to delete the ``Permission``. + name (str): + Required. The resource name of the permission. Formats: + ``tunedModels/{tuned_model}/permissions/{permission}`` + ``corpora/{corpus}/permissions/{permission}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a permission_service.DeletePermissionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, permission_service.DeletePermissionRequest): + request = permission_service.DeletePermissionRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_permission] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def transfer_ownership( + self, + request: Optional[ + Union[permission_service.TransferOwnershipRequest, dict] + ] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> permission_service.TransferOwnershipResponse: + r"""Transfers ownership of the tuned model. + This is the only way to change ownership of the tuned + model. The current owner will be downgraded to writer + role. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_transfer_ownership(): + # Create a client + client = generativelanguage_v1beta.PermissionServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.TransferOwnershipRequest( + name="name_value", + email_address="email_address_value", + ) + + # Make the request + response = client.transfer_ownership(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.TransferOwnershipRequest, dict]): + The request object. Request to transfer the ownership of + the tuned model. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.TransferOwnershipResponse: + Response from TransferOwnership. + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a permission_service.TransferOwnershipRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, permission_service.TransferOwnershipRequest): + request = permission_service.TransferOwnershipRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.transfer_ownership] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self) -> "PermissionServiceClient": + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("PermissionServiceClient",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/pagers.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/pagers.py new file mode 100644 index 000000000000..f5c60ec9ea8d --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/pagers.py @@ -0,0 +1,155 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import ( + Any, + AsyncIterator, + Awaitable, + Callable, + Iterator, + Optional, + Sequence, + Tuple, +) + +from google.ai.generativelanguage_v1beta.types import permission, permission_service + + +class ListPermissionsPager: + """A pager for iterating through ``list_permissions`` requests. + + This class thinly wraps an initial + :class:`google.ai.generativelanguage_v1beta.types.ListPermissionsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``permissions`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListPermissions`` requests and continue to iterate + through the ``permissions`` field on the + corresponding responses. + + All the usual :class:`google.ai.generativelanguage_v1beta.types.ListPermissionsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., permission_service.ListPermissionsResponse], + request: permission_service.ListPermissionsRequest, + response: permission_service.ListPermissionsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.ai.generativelanguage_v1beta.types.ListPermissionsRequest): + The initial request object. + response (google.ai.generativelanguage_v1beta.types.ListPermissionsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = permission_service.ListPermissionsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[permission_service.ListPermissionsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[permission.Permission]: + for page in self.pages: + yield from page.permissions + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListPermissionsAsyncPager: + """A pager for iterating through ``list_permissions`` requests. + + This class thinly wraps an initial + :class:`google.ai.generativelanguage_v1beta.types.ListPermissionsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``permissions`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListPermissions`` requests and continue to iterate + through the ``permissions`` field on the + corresponding responses. + + All the usual :class:`google.ai.generativelanguage_v1beta.types.ListPermissionsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[permission_service.ListPermissionsResponse]], + request: permission_service.ListPermissionsRequest, + response: permission_service.ListPermissionsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.ai.generativelanguage_v1beta.types.ListPermissionsRequest): + The initial request object. + response (google.ai.generativelanguage_v1beta.types.ListPermissionsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = permission_service.ListPermissionsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[permission_service.ListPermissionsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[permission.Permission]: + async def async_generator(): + async for page in self.pages: + for response in page.permissions: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/transports/__init__.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/transports/__init__.py new file mode 100644 index 000000000000..fe33568492a6 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/transports/__init__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import PermissionServiceTransport +from .grpc import PermissionServiceGrpcTransport +from .grpc_asyncio import PermissionServiceGrpcAsyncIOTransport +from .rest import PermissionServiceRestInterceptor, PermissionServiceRestTransport + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[PermissionServiceTransport]] +_transport_registry["grpc"] = PermissionServiceGrpcTransport +_transport_registry["grpc_asyncio"] = PermissionServiceGrpcAsyncIOTransport +_transport_registry["rest"] = PermissionServiceRestTransport + +__all__ = ( + "PermissionServiceTransport", + "PermissionServiceGrpcTransport", + "PermissionServiceGrpcAsyncIOTransport", + "PermissionServiceRestTransport", + "PermissionServiceRestInterceptor", +) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/transports/base.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/transports/base.py new file mode 100644 index 000000000000..b0b11618fb9a --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/transports/base.py @@ -0,0 +1,280 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account # type: ignore +from google.protobuf import empty_pb2 # type: ignore + +from google.ai.generativelanguage_v1beta import gapic_version as package_version +from google.ai.generativelanguage_v1beta.types import permission as gag_permission +from google.ai.generativelanguage_v1beta.types import permission +from google.ai.generativelanguage_v1beta.types import permission_service + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +class PermissionServiceTransport(abc.ABC): + """Abstract transport class for PermissionService.""" + + AUTH_SCOPES = () + + DEFAULT_HOST: str = "generativelanguage.googleapis.com" + + def __init__( + self, + *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default( + **scopes_kwargs, quota_project_id=quota_project_id + ) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience( + api_audience if api_audience else host + ) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if ( + always_use_jwt_access + and isinstance(credentials, service_account.Credentials) + and hasattr(service_account.Credentials, "with_always_use_jwt_access") + ): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_permission: gapic_v1.method.wrap_method( + self.create_permission, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_permission: gapic_v1.method.wrap_method( + self.get_permission, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_permissions: gapic_v1.method.wrap_method( + self.list_permissions, + default_timeout=None, + client_info=client_info, + ), + self.update_permission: gapic_v1.method.wrap_method( + self.update_permission, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.delete_permission: gapic_v1.method.wrap_method( + self.delete_permission, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.transfer_ownership: gapic_v1.method.wrap_method( + self.transfer_ownership, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def create_permission( + self, + ) -> Callable[ + [permission_service.CreatePermissionRequest], + Union[gag_permission.Permission, Awaitable[gag_permission.Permission]], + ]: + raise NotImplementedError() + + @property + def get_permission( + self, + ) -> Callable[ + [permission_service.GetPermissionRequest], + Union[permission.Permission, Awaitable[permission.Permission]], + ]: + raise NotImplementedError() + + @property + def list_permissions( + self, + ) -> Callable[ + [permission_service.ListPermissionsRequest], + Union[ + permission_service.ListPermissionsResponse, + Awaitable[permission_service.ListPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def update_permission( + self, + ) -> Callable[ + [permission_service.UpdatePermissionRequest], + Union[gag_permission.Permission, Awaitable[gag_permission.Permission]], + ]: + raise NotImplementedError() + + @property + def delete_permission( + self, + ) -> Callable[ + [permission_service.DeletePermissionRequest], + Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], + ]: + raise NotImplementedError() + + @property + def transfer_ownership( + self, + ) -> Callable[ + [permission_service.TransferOwnershipRequest], + Union[ + permission_service.TransferOwnershipResponse, + Awaitable[permission_service.TransferOwnershipResponse], + ], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ("PermissionServiceTransport",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/transports/grpc.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/transports/grpc.py new file mode 100644 index 000000000000..6237599bb957 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/transports/grpc.py @@ -0,0 +1,413 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +from google.api_core import gapic_v1, grpc_helpers +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +import grpc # type: ignore + +from google.ai.generativelanguage_v1beta.types import permission as gag_permission +from google.ai.generativelanguage_v1beta.types import permission +from google.ai.generativelanguage_v1beta.types import permission_service + +from .base import DEFAULT_CLIENT_INFO, PermissionServiceTransport + + +class PermissionServiceGrpcTransport(PermissionServiceTransport): + """gRPC backend transport for PermissionService. + + Provides methods for managing permissions to PaLM API + resources. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service.""" + return self._grpc_channel + + @property + def create_permission( + self, + ) -> Callable[ + [permission_service.CreatePermissionRequest], gag_permission.Permission + ]: + r"""Return a callable for the create permission method over gRPC. + + Create a permission to a specific resource. + + Returns: + Callable[[~.CreatePermissionRequest], + ~.Permission]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_permission" not in self._stubs: + self._stubs["create_permission"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.PermissionService/CreatePermission", + request_serializer=permission_service.CreatePermissionRequest.serialize, + response_deserializer=gag_permission.Permission.deserialize, + ) + return self._stubs["create_permission"] + + @property + def get_permission( + self, + ) -> Callable[[permission_service.GetPermissionRequest], permission.Permission]: + r"""Return a callable for the get permission method over gRPC. + + Gets information about a specific Permission. + + Returns: + Callable[[~.GetPermissionRequest], + ~.Permission]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_permission" not in self._stubs: + self._stubs["get_permission"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.PermissionService/GetPermission", + request_serializer=permission_service.GetPermissionRequest.serialize, + response_deserializer=permission.Permission.deserialize, + ) + return self._stubs["get_permission"] + + @property + def list_permissions( + self, + ) -> Callable[ + [permission_service.ListPermissionsRequest], + permission_service.ListPermissionsResponse, + ]: + r"""Return a callable for the list permissions method over gRPC. + + Lists permissions for the specific resource. + + Returns: + Callable[[~.ListPermissionsRequest], + ~.ListPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_permissions" not in self._stubs: + self._stubs["list_permissions"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.PermissionService/ListPermissions", + request_serializer=permission_service.ListPermissionsRequest.serialize, + response_deserializer=permission_service.ListPermissionsResponse.deserialize, + ) + return self._stubs["list_permissions"] + + @property + def update_permission( + self, + ) -> Callable[ + [permission_service.UpdatePermissionRequest], gag_permission.Permission + ]: + r"""Return a callable for the update permission method over gRPC. + + Updates the permission. + + Returns: + Callable[[~.UpdatePermissionRequest], + ~.Permission]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_permission" not in self._stubs: + self._stubs["update_permission"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.PermissionService/UpdatePermission", + request_serializer=permission_service.UpdatePermissionRequest.serialize, + response_deserializer=gag_permission.Permission.deserialize, + ) + return self._stubs["update_permission"] + + @property + def delete_permission( + self, + ) -> Callable[[permission_service.DeletePermissionRequest], empty_pb2.Empty]: + r"""Return a callable for the delete permission method over gRPC. + + Deletes the permission. + + Returns: + Callable[[~.DeletePermissionRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_permission" not in self._stubs: + self._stubs["delete_permission"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.PermissionService/DeletePermission", + request_serializer=permission_service.DeletePermissionRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["delete_permission"] + + @property + def transfer_ownership( + self, + ) -> Callable[ + [permission_service.TransferOwnershipRequest], + permission_service.TransferOwnershipResponse, + ]: + r"""Return a callable for the transfer ownership method over gRPC. + + Transfers ownership of the tuned model. + This is the only way to change ownership of the tuned + model. The current owner will be downgraded to writer + role. + + Returns: + Callable[[~.TransferOwnershipRequest], + ~.TransferOwnershipResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "transfer_ownership" not in self._stubs: + self._stubs["transfer_ownership"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.PermissionService/TransferOwnership", + request_serializer=permission_service.TransferOwnershipRequest.serialize, + response_deserializer=permission_service.TransferOwnershipResponse.deserialize, + ) + return self._stubs["transfer_ownership"] + + def close(self): + self.grpc_channel.close() + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ("PermissionServiceGrpcTransport",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/transports/grpc_asyncio.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/transports/grpc_asyncio.py new file mode 100644 index 000000000000..50a0f11a537a --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/transports/grpc_asyncio.py @@ -0,0 +1,418 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +from google.api_core import gapic_v1, grpc_helpers_async +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.ai.generativelanguage_v1beta.types import permission as gag_permission +from google.ai.generativelanguage_v1beta.types import permission +from google.ai.generativelanguage_v1beta.types import permission_service + +from .base import DEFAULT_CLIENT_INFO, PermissionServiceTransport +from .grpc import PermissionServiceGrpcTransport + + +class PermissionServiceGrpcAsyncIOTransport(PermissionServiceTransport): + """gRPC AsyncIO backend transport for PermissionService. + + Provides methods for managing permissions to PaLM API + resources. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def create_permission( + self, + ) -> Callable[ + [permission_service.CreatePermissionRequest], + Awaitable[gag_permission.Permission], + ]: + r"""Return a callable for the create permission method over gRPC. + + Create a permission to a specific resource. + + Returns: + Callable[[~.CreatePermissionRequest], + Awaitable[~.Permission]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_permission" not in self._stubs: + self._stubs["create_permission"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.PermissionService/CreatePermission", + request_serializer=permission_service.CreatePermissionRequest.serialize, + response_deserializer=gag_permission.Permission.deserialize, + ) + return self._stubs["create_permission"] + + @property + def get_permission( + self, + ) -> Callable[ + [permission_service.GetPermissionRequest], Awaitable[permission.Permission] + ]: + r"""Return a callable for the get permission method over gRPC. + + Gets information about a specific Permission. + + Returns: + Callable[[~.GetPermissionRequest], + Awaitable[~.Permission]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_permission" not in self._stubs: + self._stubs["get_permission"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.PermissionService/GetPermission", + request_serializer=permission_service.GetPermissionRequest.serialize, + response_deserializer=permission.Permission.deserialize, + ) + return self._stubs["get_permission"] + + @property + def list_permissions( + self, + ) -> Callable[ + [permission_service.ListPermissionsRequest], + Awaitable[permission_service.ListPermissionsResponse], + ]: + r"""Return a callable for the list permissions method over gRPC. + + Lists permissions for the specific resource. + + Returns: + Callable[[~.ListPermissionsRequest], + Awaitable[~.ListPermissionsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_permissions" not in self._stubs: + self._stubs["list_permissions"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.PermissionService/ListPermissions", + request_serializer=permission_service.ListPermissionsRequest.serialize, + response_deserializer=permission_service.ListPermissionsResponse.deserialize, + ) + return self._stubs["list_permissions"] + + @property + def update_permission( + self, + ) -> Callable[ + [permission_service.UpdatePermissionRequest], + Awaitable[gag_permission.Permission], + ]: + r"""Return a callable for the update permission method over gRPC. + + Updates the permission. + + Returns: + Callable[[~.UpdatePermissionRequest], + Awaitable[~.Permission]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_permission" not in self._stubs: + self._stubs["update_permission"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.PermissionService/UpdatePermission", + request_serializer=permission_service.UpdatePermissionRequest.serialize, + response_deserializer=gag_permission.Permission.deserialize, + ) + return self._stubs["update_permission"] + + @property + def delete_permission( + self, + ) -> Callable[ + [permission_service.DeletePermissionRequest], Awaitable[empty_pb2.Empty] + ]: + r"""Return a callable for the delete permission method over gRPC. + + Deletes the permission. + + Returns: + Callable[[~.DeletePermissionRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_permission" not in self._stubs: + self._stubs["delete_permission"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.PermissionService/DeletePermission", + request_serializer=permission_service.DeletePermissionRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["delete_permission"] + + @property + def transfer_ownership( + self, + ) -> Callable[ + [permission_service.TransferOwnershipRequest], + Awaitable[permission_service.TransferOwnershipResponse], + ]: + r"""Return a callable for the transfer ownership method over gRPC. + + Transfers ownership of the tuned model. + This is the only way to change ownership of the tuned + model. The current owner will be downgraded to writer + role. + + Returns: + Callable[[~.TransferOwnershipRequest], + Awaitable[~.TransferOwnershipResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "transfer_ownership" not in self._stubs: + self._stubs["transfer_ownership"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.PermissionService/TransferOwnership", + request_serializer=permission_service.TransferOwnershipRequest.serialize, + response_deserializer=permission_service.TransferOwnershipResponse.deserialize, + ) + return self._stubs["transfer_ownership"] + + def close(self): + return self.grpc_channel.close() + + +__all__ = ("PermissionServiceGrpcAsyncIOTransport",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/transports/rest.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/transports/rest.py new file mode 100644 index 000000000000..b449cba24111 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/permission_service/transports/rest.py @@ -0,0 +1,1050 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import dataclasses +import json # type: ignore +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + +from google.api_core import gapic_v1, path_template, rest_helpers, rest_streaming +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.transport.requests import AuthorizedSession # type: ignore +from google.protobuf import json_format +import grpc # type: ignore +from requests import __version__ as requests_version + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + + +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore + +from google.ai.generativelanguage_v1beta.types import permission as gag_permission +from google.ai.generativelanguage_v1beta.types import permission +from google.ai.generativelanguage_v1beta.types import permission_service + +from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO +from .base import PermissionServiceTransport + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + + +class PermissionServiceRestInterceptor: + """Interceptor for PermissionService. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the PermissionServiceRestTransport. + + .. code-block:: python + class MyCustomPermissionServiceInterceptor(PermissionServiceRestInterceptor): + def pre_create_permission(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_permission(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_delete_permission(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def pre_get_permission(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_permission(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_permissions(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_permissions(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_transfer_ownership(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_transfer_ownership(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_update_permission(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_permission(self, response): + logging.log(f"Received response: {response}") + return response + + transport = PermissionServiceRestTransport(interceptor=MyCustomPermissionServiceInterceptor()) + client = PermissionServiceClient(transport=transport) + + + """ + + def pre_create_permission( + self, + request: permission_service.CreatePermissionRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[permission_service.CreatePermissionRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for create_permission + + Override in a subclass to manipulate the request or metadata + before they are sent to the PermissionService server. + """ + return request, metadata + + def post_create_permission( + self, response: gag_permission.Permission + ) -> gag_permission.Permission: + """Post-rpc interceptor for create_permission + + Override in a subclass to manipulate the response + after it is returned by the PermissionService server but before + it is returned to user code. + """ + return response + + def pre_delete_permission( + self, + request: permission_service.DeletePermissionRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[permission_service.DeletePermissionRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for delete_permission + + Override in a subclass to manipulate the request or metadata + before they are sent to the PermissionService server. + """ + return request, metadata + + def pre_get_permission( + self, + request: permission_service.GetPermissionRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[permission_service.GetPermissionRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_permission + + Override in a subclass to manipulate the request or metadata + before they are sent to the PermissionService server. + """ + return request, metadata + + def post_get_permission( + self, response: permission.Permission + ) -> permission.Permission: + """Post-rpc interceptor for get_permission + + Override in a subclass to manipulate the response + after it is returned by the PermissionService server but before + it is returned to user code. + """ + return response + + def pre_list_permissions( + self, + request: permission_service.ListPermissionsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[permission_service.ListPermissionsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_permissions + + Override in a subclass to manipulate the request or metadata + before they are sent to the PermissionService server. + """ + return request, metadata + + def post_list_permissions( + self, response: permission_service.ListPermissionsResponse + ) -> permission_service.ListPermissionsResponse: + """Post-rpc interceptor for list_permissions + + Override in a subclass to manipulate the response + after it is returned by the PermissionService server but before + it is returned to user code. + """ + return response + + def pre_transfer_ownership( + self, + request: permission_service.TransferOwnershipRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[permission_service.TransferOwnershipRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for transfer_ownership + + Override in a subclass to manipulate the request or metadata + before they are sent to the PermissionService server. + """ + return request, metadata + + def post_transfer_ownership( + self, response: permission_service.TransferOwnershipResponse + ) -> permission_service.TransferOwnershipResponse: + """Post-rpc interceptor for transfer_ownership + + Override in a subclass to manipulate the response + after it is returned by the PermissionService server but before + it is returned to user code. + """ + return response + + def pre_update_permission( + self, + request: permission_service.UpdatePermissionRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[permission_service.UpdatePermissionRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for update_permission + + Override in a subclass to manipulate the request or metadata + before they are sent to the PermissionService server. + """ + return request, metadata + + def post_update_permission( + self, response: gag_permission.Permission + ) -> gag_permission.Permission: + """Post-rpc interceptor for update_permission + + Override in a subclass to manipulate the response + after it is returned by the PermissionService server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class PermissionServiceRestStub: + _session: AuthorizedSession + _host: str + _interceptor: PermissionServiceRestInterceptor + + +class PermissionServiceRestTransport(PermissionServiceTransport): + """REST backend transport for PermissionService. + + Provides methods for managing permissions to PaLM API + resources. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + + """ + + def __init__( + self, + *, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + interceptor: Optional[PermissionServiceRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError( + f"Unexpected hostname structure: {host}" + ) # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST + ) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or PermissionServiceRestInterceptor() + self._prep_wrapped_messages(client_info) + + class _CreatePermission(PermissionServiceRestStub): + def __hash__(self): + return hash("CreatePermission") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: permission_service.CreatePermissionRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gag_permission.Permission: + r"""Call the create permission method over HTTP. + + Args: + request (~.permission_service.CreatePermissionRequest): + The request object. Request to create a ``Permission``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gag_permission.Permission: + Permission resource grants user, + group or the rest of the world access to + the PaLM API resource (e.g. a tuned + model, corpus). + + A role is a collection of permitted + operations that allows users to perform + specific actions on PaLM API resources. + To make them available to users, groups, + or service accounts, you assign roles. + When you assign a role, you grant + permissions that the role contains. + + There are three concentric roles. Each + role is a superset of the previous + role's permitted operations: + + - reader can use the resource (e.g. + tuned model, corpus) for inference + - writer has reader's permissions and + additionally can edit and share + - owner has writer's permissions and + additionally can delete + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta/{parent=tunedModels/*}/permissions", + "body": "permission", + }, + { + "method": "post", + "uri": "/v1beta/{parent=corpora/*}/permissions", + "body": "permission", + }, + ] + request, metadata = self._interceptor.pre_create_permission( + request, metadata + ) + pb_request = permission_service.CreatePermissionRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = gag_permission.Permission() + pb_resp = gag_permission.Permission.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_permission(resp) + return resp + + class _DeletePermission(PermissionServiceRestStub): + def __hash__(self): + return hash("DeletePermission") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: permission_service.DeletePermissionRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ): + r"""Call the delete permission method over HTTP. + + Args: + request (~.permission_service.DeletePermissionRequest): + The request object. Request to delete the ``Permission``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v1beta/{name=tunedModels/*/permissions/*}", + }, + { + "method": "delete", + "uri": "/v1beta/{name=corpora/*/permissions/*}", + }, + ] + request, metadata = self._interceptor.pre_delete_permission( + request, metadata + ) + pb_request = permission_service.DeletePermissionRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + class _GetPermission(PermissionServiceRestStub): + def __hash__(self): + return hash("GetPermission") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: permission_service.GetPermissionRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> permission.Permission: + r"""Call the get permission method over HTTP. + + Args: + request (~.permission_service.GetPermissionRequest): + The request object. Request for getting information about a specific + ``Permission``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.permission.Permission: + Permission resource grants user, + group or the rest of the world access to + the PaLM API resource (e.g. a tuned + model, corpus). + + A role is a collection of permitted + operations that allows users to perform + specific actions on PaLM API resources. + To make them available to users, groups, + or service accounts, you assign roles. + When you assign a role, you grant + permissions that the role contains. + + There are three concentric roles. Each + role is a superset of the previous + role's permitted operations: + + - reader can use the resource (e.g. + tuned model, corpus) for inference + - writer has reader's permissions and + additionally can edit and share + - owner has writer's permissions and + additionally can delete + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1beta/{name=tunedModels/*/permissions/*}", + }, + { + "method": "get", + "uri": "/v1beta/{name=corpora/*/permissions/*}", + }, + ] + request, metadata = self._interceptor.pre_get_permission(request, metadata) + pb_request = permission_service.GetPermissionRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = permission.Permission() + pb_resp = permission.Permission.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_permission(resp) + return resp + + class _ListPermissions(PermissionServiceRestStub): + def __hash__(self): + return hash("ListPermissions") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: permission_service.ListPermissionsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> permission_service.ListPermissionsResponse: + r"""Call the list permissions method over HTTP. + + Args: + request (~.permission_service.ListPermissionsRequest): + The request object. Request for listing permissions. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.permission_service.ListPermissionsResponse: + Response from ``ListPermissions`` containing a paginated + list of permissions. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1beta/{parent=tunedModels/*}/permissions", + }, + { + "method": "get", + "uri": "/v1beta/{parent=corpora/*}/permissions", + }, + ] + request, metadata = self._interceptor.pre_list_permissions( + request, metadata + ) + pb_request = permission_service.ListPermissionsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = permission_service.ListPermissionsResponse() + pb_resp = permission_service.ListPermissionsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_permissions(resp) + return resp + + class _TransferOwnership(PermissionServiceRestStub): + def __hash__(self): + return hash("TransferOwnership") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: permission_service.TransferOwnershipRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> permission_service.TransferOwnershipResponse: + r"""Call the transfer ownership method over HTTP. + + Args: + request (~.permission_service.TransferOwnershipRequest): + The request object. Request to transfer the ownership of + the tuned model. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.permission_service.TransferOwnershipResponse: + Response from ``TransferOwnership``. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta/{name=tunedModels/*}:transferOwnership", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_transfer_ownership( + request, metadata + ) + pb_request = permission_service.TransferOwnershipRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = permission_service.TransferOwnershipResponse() + pb_resp = permission_service.TransferOwnershipResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_transfer_ownership(resp) + return resp + + class _UpdatePermission(PermissionServiceRestStub): + def __hash__(self): + return hash("UpdatePermission") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "updateMask": {}, + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: permission_service.UpdatePermissionRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gag_permission.Permission: + r"""Call the update permission method over HTTP. + + Args: + request (~.permission_service.UpdatePermissionRequest): + The request object. Request to update the ``Permission``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gag_permission.Permission: + Permission resource grants user, + group or the rest of the world access to + the PaLM API resource (e.g. a tuned + model, corpus). + + A role is a collection of permitted + operations that allows users to perform + specific actions on PaLM API resources. + To make them available to users, groups, + or service accounts, you assign roles. + When you assign a role, you grant + permissions that the role contains. + + There are three concentric roles. Each + role is a superset of the previous + role's permitted operations: + + - reader can use the resource (e.g. + tuned model, corpus) for inference + - writer has reader's permissions and + additionally can edit and share + - owner has writer's permissions and + additionally can delete + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v1beta/{permission.name=tunedModels/*/permissions/*}", + "body": "permission", + }, + { + "method": "patch", + "uri": "/v1beta/{permission.name=corpora/*/permissions/*}", + "body": "permission", + }, + ] + request, metadata = self._interceptor.pre_update_permission( + request, metadata + ) + pb_request = permission_service.UpdatePermissionRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = gag_permission.Permission() + pb_resp = gag_permission.Permission.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_permission(resp) + return resp + + @property + def create_permission( + self, + ) -> Callable[ + [permission_service.CreatePermissionRequest], gag_permission.Permission + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreatePermission(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_permission( + self, + ) -> Callable[[permission_service.DeletePermissionRequest], empty_pb2.Empty]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeletePermission(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_permission( + self, + ) -> Callable[[permission_service.GetPermissionRequest], permission.Permission]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetPermission(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_permissions( + self, + ) -> Callable[ + [permission_service.ListPermissionsRequest], + permission_service.ListPermissionsResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListPermissions(self._session, self._host, self._interceptor) # type: ignore + + @property + def transfer_ownership( + self, + ) -> Callable[ + [permission_service.TransferOwnershipRequest], + permission_service.TransferOwnershipResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._TransferOwnership(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_permission( + self, + ) -> Callable[ + [permission_service.UpdatePermissionRequest], gag_permission.Permission + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdatePermission(self._session, self._host, self._interceptor) # type: ignore + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__ = ("PermissionServiceRestTransport",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/__init__.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/__init__.py new file mode 100644 index 000000000000..57b35858559c --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .async_client import RetrieverServiceAsyncClient +from .client import RetrieverServiceClient + +__all__ = ( + "RetrieverServiceClient", + "RetrieverServiceAsyncClient", +) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/async_client.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/async_client.py new file mode 100644 index 000000000000..74b278b12262 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/async_client.py @@ -0,0 +1,2333 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry_async as retries +from google.api_core.client_options import ClientOptions +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.ai.generativelanguage_v1beta import gapic_version as package_version + +try: + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore + +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + +from google.ai.generativelanguage_v1beta.services.retriever_service import pagers +from google.ai.generativelanguage_v1beta.types import retriever, retriever_service + +from .client import RetrieverServiceClient +from .transports.base import DEFAULT_CLIENT_INFO, RetrieverServiceTransport +from .transports.grpc_asyncio import RetrieverServiceGrpcAsyncIOTransport + + +class RetrieverServiceAsyncClient: + """An API for semantic search over a corpus of user uploaded + content. + """ + + _client: RetrieverServiceClient + + DEFAULT_ENDPOINT = RetrieverServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = RetrieverServiceClient.DEFAULT_MTLS_ENDPOINT + + chunk_path = staticmethod(RetrieverServiceClient.chunk_path) + parse_chunk_path = staticmethod(RetrieverServiceClient.parse_chunk_path) + corpus_path = staticmethod(RetrieverServiceClient.corpus_path) + parse_corpus_path = staticmethod(RetrieverServiceClient.parse_corpus_path) + document_path = staticmethod(RetrieverServiceClient.document_path) + parse_document_path = staticmethod(RetrieverServiceClient.parse_document_path) + common_billing_account_path = staticmethod( + RetrieverServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + RetrieverServiceClient.parse_common_billing_account_path + ) + common_folder_path = staticmethod(RetrieverServiceClient.common_folder_path) + parse_common_folder_path = staticmethod( + RetrieverServiceClient.parse_common_folder_path + ) + common_organization_path = staticmethod( + RetrieverServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + RetrieverServiceClient.parse_common_organization_path + ) + common_project_path = staticmethod(RetrieverServiceClient.common_project_path) + parse_common_project_path = staticmethod( + RetrieverServiceClient.parse_common_project_path + ) + common_location_path = staticmethod(RetrieverServiceClient.common_location_path) + parse_common_location_path = staticmethod( + RetrieverServiceClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RetrieverServiceAsyncClient: The constructed client. + """ + return RetrieverServiceClient.from_service_account_info.__func__(RetrieverServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RetrieverServiceAsyncClient: The constructed client. + """ + return RetrieverServiceClient.from_service_account_file.__func__(RetrieverServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return RetrieverServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> RetrieverServiceTransport: + """Returns the transport used by the client instance. + + Returns: + RetrieverServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial( + type(RetrieverServiceClient).get_transport_class, type(RetrieverServiceClient) + ) + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, RetrieverServiceTransport] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the retriever service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.RetrieverServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = RetrieverServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def create_corpus( + self, + request: Optional[Union[retriever_service.CreateCorpusRequest, dict]] = None, + *, + corpus: Optional[retriever.Corpus] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever.Corpus: + r"""Creates an empty ``Corpus``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_create_corpus(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.CreateCorpusRequest( + ) + + # Make the request + response = await client.create_corpus(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.CreateCorpusRequest, dict]]): + The request object. Request to create a ``Corpus``. + corpus (:class:`google.ai.generativelanguage_v1beta.types.Corpus`): + Required. The ``Corpus`` to create. + This corresponds to the ``corpus`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.Corpus: + A Corpus is a collection of Documents. + A project can create up to 5 corpora. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([corpus]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = retriever_service.CreateCorpusRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if corpus is not None: + request.corpus = corpus + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_corpus, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_corpus( + self, + request: Optional[Union[retriever_service.GetCorpusRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever.Corpus: + r"""Gets information about a specific ``Corpus``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_get_corpus(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GetCorpusRequest( + name="name_value", + ) + + # Make the request + response = await client.get_corpus(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.GetCorpusRequest, dict]]): + The request object. Request for getting information about a specific + ``Corpus``. + name (:class:`str`): + Required. The name of the ``Corpus``. Example: + ``corpora/my-corpus-123`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.Corpus: + A Corpus is a collection of Documents. + A project can create up to 5 corpora. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = retriever_service.GetCorpusRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_corpus, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_corpus( + self, + request: Optional[Union[retriever_service.UpdateCorpusRequest, dict]] = None, + *, + corpus: Optional[retriever.Corpus] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever.Corpus: + r"""Updates a ``Corpus``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_update_corpus(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.UpdateCorpusRequest( + ) + + # Make the request + response = await client.update_corpus(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.UpdateCorpusRequest, dict]]): + The request object. Request to update a ``Corpus``. + corpus (:class:`google.ai.generativelanguage_v1beta.types.Corpus`): + Required. The ``Corpus`` to update. + This corresponds to the ``corpus`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The list of fields to update. Currently, this + only supports updating ``display_name``. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.Corpus: + A Corpus is a collection of Documents. + A project can create up to 5 corpora. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([corpus, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = retriever_service.UpdateCorpusRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if corpus is not None: + request.corpus = corpus + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_corpus, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("corpus.name", request.corpus.name),) + ), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_corpus( + self, + request: Optional[Union[retriever_service.DeleteCorpusRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a ``Corpus``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_delete_corpus(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.DeleteCorpusRequest( + name="name_value", + ) + + # Make the request + await client.delete_corpus(request=request) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.DeleteCorpusRequest, dict]]): + The request object. Request to delete a ``Corpus``. + name (:class:`str`): + Required. The resource name of the ``Corpus``. Example: + ``corpora/my-corpus-123`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = retriever_service.DeleteCorpusRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_corpus, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def list_corpora( + self, + request: Optional[Union[retriever_service.ListCorporaRequest, dict]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListCorporaAsyncPager: + r"""Lists all ``Corpora`` owned by the user. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_list_corpora(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.ListCorporaRequest( + ) + + # Make the request + page_result = client.list_corpora(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.ListCorporaRequest, dict]]): + The request object. Request for listing ``Corpora``. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.services.retriever_service.pagers.ListCorporaAsyncPager: + Response from ListCorpora containing a paginated list of Corpora. + The results are sorted by ascending + corpus.create_time. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + request = retriever_service.ListCorporaRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_corpora, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListCorporaAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def query_corpus( + self, + request: Optional[Union[retriever_service.QueryCorpusRequest, dict]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever_service.QueryCorpusResponse: + r"""Performs semantic search over a ``Corpus``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_query_corpus(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.QueryCorpusRequest( + name="name_value", + query="query_value", + ) + + # Make the request + response = await client.query_corpus(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.QueryCorpusRequest, dict]]): + The request object. Request for querying a ``Corpus``. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.QueryCorpusResponse: + Response from QueryCorpus containing a list of relevant + chunks. + + """ + # Create or coerce a protobuf request object. + request = retriever_service.QueryCorpusRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.query_corpus, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def create_document( + self, + request: Optional[Union[retriever_service.CreateDocumentRequest, dict]] = None, + *, + parent: Optional[str] = None, + document: Optional[retriever.Document] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever.Document: + r"""Creates an empty ``Document``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_create_document(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.CreateDocumentRequest( + parent="parent_value", + ) + + # Make the request + response = await client.create_document(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.CreateDocumentRequest, dict]]): + The request object. Request to create a ``Document``. + parent (:class:`str`): + Required. The name of the ``Corpus`` where this + ``Document`` will be created. Example: + ``corpora/my-corpus-123`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + document (:class:`google.ai.generativelanguage_v1beta.types.Document`): + Required. The ``Document`` to create. + This corresponds to the ``document`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.Document: + A Document is a collection of Chunks. + A Corpus can have a maximum of 10,000 Documents. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, document]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = retriever_service.CreateDocumentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if document is not None: + request.document = document + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_document, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_document( + self, + request: Optional[Union[retriever_service.GetDocumentRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever.Document: + r"""Gets information about a specific ``Document``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_get_document(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GetDocumentRequest( + name="name_value", + ) + + # Make the request + response = await client.get_document(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.GetDocumentRequest, dict]]): + The request object. Request for getting information about a specific + ``Document``. + name (:class:`str`): + Required. The name of the ``Document`` to retrieve. + Example: ``corpora/my-corpus-123/documents/the-doc-abc`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.Document: + A Document is a collection of Chunks. + A Corpus can have a maximum of 10,000 Documents. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = retriever_service.GetDocumentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_document, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_document( + self, + request: Optional[Union[retriever_service.UpdateDocumentRequest, dict]] = None, + *, + document: Optional[retriever.Document] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever.Document: + r"""Updates a ``Document``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_update_document(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.UpdateDocumentRequest( + ) + + # Make the request + response = await client.update_document(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.UpdateDocumentRequest, dict]]): + The request object. Request to update a ``Document``. + document (:class:`google.ai.generativelanguage_v1beta.types.Document`): + Required. The ``Document`` to update. + This corresponds to the ``document`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The list of fields to update. Currently, this + only supports updating ``display_name`` and + ``custom_metadata``. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.Document: + A Document is a collection of Chunks. + A Corpus can have a maximum of 10,000 Documents. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([document, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = retriever_service.UpdateDocumentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if document is not None: + request.document = document + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_document, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("document.name", request.document.name),) + ), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_document( + self, + request: Optional[Union[retriever_service.DeleteDocumentRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a ``Document``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_delete_document(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.DeleteDocumentRequest( + name="name_value", + ) + + # Make the request + await client.delete_document(request=request) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.DeleteDocumentRequest, dict]]): + The request object. Request to delete a ``Document``. + name (:class:`str`): + Required. The resource name of the ``Document`` to + delete. Example: + ``corpora/my-corpus-123/documents/the-doc-abc`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = retriever_service.DeleteDocumentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_document, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def list_documents( + self, + request: Optional[Union[retriever_service.ListDocumentsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDocumentsAsyncPager: + r"""Lists all ``Document``\ s in a ``Corpus``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_list_documents(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.ListDocumentsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_documents(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.ListDocumentsRequest, dict]]): + The request object. Request for listing ``Document``\ s. + parent (:class:`str`): + Required. The name of the ``Corpus`` containing + ``Document``\ s. Example: ``corpora/my-corpus-123`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.services.retriever_service.pagers.ListDocumentsAsyncPager: + Response from ListDocuments containing a paginated list of Documents. + The Documents are sorted by ascending + document.create_time. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = retriever_service.ListDocumentsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_documents, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListDocumentsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def query_document( + self, + request: Optional[Union[retriever_service.QueryDocumentRequest, dict]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever_service.QueryDocumentResponse: + r"""Performs semantic search over a ``Document``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_query_document(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.QueryDocumentRequest( + name="name_value", + query="query_value", + ) + + # Make the request + response = await client.query_document(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.QueryDocumentRequest, dict]]): + The request object. Request for querying a ``Document``. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.QueryDocumentResponse: + Response from QueryDocument containing a list of + relevant chunks. + + """ + # Create or coerce a protobuf request object. + request = retriever_service.QueryDocumentRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.query_document, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def create_chunk( + self, + request: Optional[Union[retriever_service.CreateChunkRequest, dict]] = None, + *, + parent: Optional[str] = None, + chunk: Optional[retriever.Chunk] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever.Chunk: + r"""Creates a ``Chunk``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_create_chunk(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + chunk = generativelanguage_v1beta.Chunk() + chunk.data.string_value = "string_value_value" + + request = generativelanguage_v1beta.CreateChunkRequest( + parent="parent_value", + chunk=chunk, + ) + + # Make the request + response = await client.create_chunk(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.CreateChunkRequest, dict]]): + The request object. Request to create a ``Chunk``. + parent (:class:`str`): + Required. The name of the ``Document`` where this + ``Chunk`` will be created. Example: + ``corpora/my-corpus-123/documents/the-doc-abc`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + chunk (:class:`google.ai.generativelanguage_v1beta.types.Chunk`): + Required. The ``Chunk`` to create. + This corresponds to the ``chunk`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.Chunk: + A Chunk is a subpart of a Document that is treated as an independent unit + for the purposes of vector representation and + storage. A Corpus can have a maximum of 1 million + Chunks. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, chunk]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = retriever_service.CreateChunkRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if chunk is not None: + request.chunk = chunk + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_chunk, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def batch_create_chunks( + self, + request: Optional[ + Union[retriever_service.BatchCreateChunksRequest, dict] + ] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever_service.BatchCreateChunksResponse: + r"""Batch create ``Chunk``\ s. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_batch_create_chunks(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + requests = generativelanguage_v1beta.CreateChunkRequest() + requests.parent = "parent_value" + requests.chunk.data.string_value = "string_value_value" + + request = generativelanguage_v1beta.BatchCreateChunksRequest( + requests=requests, + ) + + # Make the request + response = await client.batch_create_chunks(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.BatchCreateChunksRequest, dict]]): + The request object. Request to batch create ``Chunk``\ s. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.BatchCreateChunksResponse: + Response from BatchCreateChunks containing a list of + created Chunks. + + """ + # Create or coerce a protobuf request object. + request = retriever_service.BatchCreateChunksRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_create_chunks, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_chunk( + self, + request: Optional[Union[retriever_service.GetChunkRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever.Chunk: + r"""Gets information about a specific ``Chunk``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_get_chunk(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GetChunkRequest( + name="name_value", + ) + + # Make the request + response = await client.get_chunk(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.GetChunkRequest, dict]]): + The request object. Request for getting information about a specific + ``Chunk``. + name (:class:`str`): + Required. The name of the ``Chunk`` to retrieve. + Example: + ``corpora/my-corpus-123/documents/the-doc-abc/chunks/some-chunk`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.Chunk: + A Chunk is a subpart of a Document that is treated as an independent unit + for the purposes of vector representation and + storage. A Corpus can have a maximum of 1 million + Chunks. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = retriever_service.GetChunkRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_chunk, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_chunk( + self, + request: Optional[Union[retriever_service.UpdateChunkRequest, dict]] = None, + *, + chunk: Optional[retriever.Chunk] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever.Chunk: + r"""Updates a ``Chunk``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_update_chunk(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + chunk = generativelanguage_v1beta.Chunk() + chunk.data.string_value = "string_value_value" + + request = generativelanguage_v1beta.UpdateChunkRequest( + chunk=chunk, + ) + + # Make the request + response = await client.update_chunk(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.UpdateChunkRequest, dict]]): + The request object. Request to update a ``Chunk``. + chunk (:class:`google.ai.generativelanguage_v1beta.types.Chunk`): + Required. The ``Chunk`` to update. + This corresponds to the ``chunk`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The list of fields to update. Currently, this + only supports updating ``custom_metadata`` and ``data``. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.Chunk: + A Chunk is a subpart of a Document that is treated as an independent unit + for the purposes of vector representation and + storage. A Corpus can have a maximum of 1 million + Chunks. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([chunk, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = retriever_service.UpdateChunkRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if chunk is not None: + request.chunk = chunk + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_chunk, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("chunk.name", request.chunk.name),) + ), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def batch_update_chunks( + self, + request: Optional[ + Union[retriever_service.BatchUpdateChunksRequest, dict] + ] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever_service.BatchUpdateChunksResponse: + r"""Batch update ``Chunk``\ s. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_batch_update_chunks(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + requests = generativelanguage_v1beta.UpdateChunkRequest() + requests.chunk.data.string_value = "string_value_value" + + request = generativelanguage_v1beta.BatchUpdateChunksRequest( + requests=requests, + ) + + # Make the request + response = await client.batch_update_chunks(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.BatchUpdateChunksRequest, dict]]): + The request object. Request to batch update ``Chunk``\ s. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.BatchUpdateChunksResponse: + Response from BatchUpdateChunks containing a list of + updated Chunks. + + """ + # Create or coerce a protobuf request object. + request = retriever_service.BatchUpdateChunksRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_update_chunks, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_chunk( + self, + request: Optional[Union[retriever_service.DeleteChunkRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a ``Chunk``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_delete_chunk(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.DeleteChunkRequest( + name="name_value", + ) + + # Make the request + await client.delete_chunk(request=request) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.DeleteChunkRequest, dict]]): + The request object. Request to delete a ``Chunk``. + name (:class:`str`): + Required. The resource name of the ``Chunk`` to delete. + Example: + ``corpora/my-corpus-123/documents/the-doc-abc/chunks/some-chunk`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = retriever_service.DeleteChunkRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_chunk, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def batch_delete_chunks( + self, + request: Optional[ + Union[retriever_service.BatchDeleteChunksRequest, dict] + ] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Batch delete ``Chunk``\ s. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_batch_delete_chunks(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + requests = generativelanguage_v1beta.DeleteChunkRequest() + requests.name = "name_value" + + request = generativelanguage_v1beta.BatchDeleteChunksRequest( + requests=requests, + ) + + # Make the request + await client.batch_delete_chunks(request=request) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.BatchDeleteChunksRequest, dict]]): + The request object. Request to batch delete ``Chunk``\ s. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + request = retriever_service.BatchDeleteChunksRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_delete_chunks, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def list_chunks( + self, + request: Optional[Union[retriever_service.ListChunksRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListChunksAsyncPager: + r"""Lists all ``Chunk``\ s in a ``Document``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_list_chunks(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.ListChunksRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_chunks(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.ListChunksRequest, dict]]): + The request object. Request for listing ``Chunk``\ s. + parent (:class:`str`): + Required. The name of the ``Document`` containing + ``Chunk``\ s. Example: + ``corpora/my-corpus-123/documents/the-doc-abc`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.services.retriever_service.pagers.ListChunksAsyncPager: + Response from ListChunks containing a paginated list of Chunks. + The Chunks are sorted by ascending chunk.create_time. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = retriever_service.ListChunksRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_chunks, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListChunksAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def __aenter__(self) -> "RetrieverServiceAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("RetrieverServiceAsyncClient",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/client.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/client.py new file mode 100644 index 000000000000..96ff948438c0 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/client.py @@ -0,0 +1,2459 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.ai.generativelanguage_v1beta import gapic_version as package_version + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + +from google.ai.generativelanguage_v1beta.services.retriever_service import pagers +from google.ai.generativelanguage_v1beta.types import retriever, retriever_service + +from .transports.base import DEFAULT_CLIENT_INFO, RetrieverServiceTransport +from .transports.grpc import RetrieverServiceGrpcTransport +from .transports.grpc_asyncio import RetrieverServiceGrpcAsyncIOTransport +from .transports.rest import RetrieverServiceRestTransport + + +class RetrieverServiceClientMeta(type): + """Metaclass for the RetrieverService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[RetrieverServiceTransport]] + _transport_registry["grpc"] = RetrieverServiceGrpcTransport + _transport_registry["grpc_asyncio"] = RetrieverServiceGrpcAsyncIOTransport + _transport_registry["rest"] = RetrieverServiceRestTransport + + def get_transport_class( + cls, + label: Optional[str] = None, + ) -> Type[RetrieverServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class RetrieverServiceClient(metaclass=RetrieverServiceClientMeta): + """An API for semantic search over a corpus of user uploaded + content. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "generativelanguage.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RetrieverServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RetrieverServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> RetrieverServiceTransport: + """Returns the transport used by the client instance. + + Returns: + RetrieverServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def chunk_path( + corpus: str, + document: str, + chunk: str, + ) -> str: + """Returns a fully-qualified chunk string.""" + return "corpora/{corpus}/documents/{document}/chunks/{chunk}".format( + corpus=corpus, + document=document, + chunk=chunk, + ) + + @staticmethod + def parse_chunk_path(path: str) -> Dict[str, str]: + """Parses a chunk path into its component segments.""" + m = re.match( + r"^corpora/(?P.+?)/documents/(?P.+?)/chunks/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def corpus_path( + corpus: str, + ) -> str: + """Returns a fully-qualified corpus string.""" + return "corpora/{corpus}".format( + corpus=corpus, + ) + + @staticmethod + def parse_corpus_path(path: str) -> Dict[str, str]: + """Parses a corpus path into its component segments.""" + m = re.match(r"^corpora/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def document_path( + corpus: str, + document: str, + ) -> str: + """Returns a fully-qualified document string.""" + return "corpora/{corpus}/documents/{document}".format( + corpus=corpus, + document=document, + ) + + @staticmethod + def parse_document_path(path: str) -> Dict[str, str]: + """Parses a document path into its component segments.""" + m = re.match(r"^corpora/(?P.+?)/documents/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path( + billing_account: str, + ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path( + folder: str, + ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format( + folder=folder, + ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path( + organization: str, + ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format( + organization=organization, + ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path( + project: str, + ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format( + project=project, + ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path( + project: str, + location: str, + ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[client_options_lib.ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[Union[str, RetrieverServiceTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the retriever service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, RetrieverServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + client_options = cast(client_options_lib.ClientOptions, client_options) + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( + client_options + ) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError( + "client_options.api_key and credentials are mutually exclusive" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, RetrieverServiceTransport): + # transport is a RetrieverServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr( + google.auth._default, "get_api_key_credentials" + ): + credentials = google.auth._default.get_api_key_credentials( + api_key_value + ) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=client_options.api_audience, + ) + + def create_corpus( + self, + request: Optional[Union[retriever_service.CreateCorpusRequest, dict]] = None, + *, + corpus: Optional[retriever.Corpus] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever.Corpus: + r"""Creates an empty ``Corpus``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_create_corpus(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.CreateCorpusRequest( + ) + + # Make the request + response = client.create_corpus(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.CreateCorpusRequest, dict]): + The request object. Request to create a ``Corpus``. + corpus (google.ai.generativelanguage_v1beta.types.Corpus): + Required. The ``Corpus`` to create. + This corresponds to the ``corpus`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.Corpus: + A Corpus is a collection of Documents. + A project can create up to 5 corpora. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([corpus]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a retriever_service.CreateCorpusRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, retriever_service.CreateCorpusRequest): + request = retriever_service.CreateCorpusRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if corpus is not None: + request.corpus = corpus + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_corpus] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_corpus( + self, + request: Optional[Union[retriever_service.GetCorpusRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever.Corpus: + r"""Gets information about a specific ``Corpus``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_get_corpus(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GetCorpusRequest( + name="name_value", + ) + + # Make the request + response = client.get_corpus(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.GetCorpusRequest, dict]): + The request object. Request for getting information about a specific + ``Corpus``. + name (str): + Required. The name of the ``Corpus``. Example: + ``corpora/my-corpus-123`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.Corpus: + A Corpus is a collection of Documents. + A project can create up to 5 corpora. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a retriever_service.GetCorpusRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, retriever_service.GetCorpusRequest): + request = retriever_service.GetCorpusRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_corpus] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_corpus( + self, + request: Optional[Union[retriever_service.UpdateCorpusRequest, dict]] = None, + *, + corpus: Optional[retriever.Corpus] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever.Corpus: + r"""Updates a ``Corpus``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_update_corpus(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.UpdateCorpusRequest( + ) + + # Make the request + response = client.update_corpus(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.UpdateCorpusRequest, dict]): + The request object. Request to update a ``Corpus``. + corpus (google.ai.generativelanguage_v1beta.types.Corpus): + Required. The ``Corpus`` to update. + This corresponds to the ``corpus`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The list of fields to update. Currently, this + only supports updating ``display_name``. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.Corpus: + A Corpus is a collection of Documents. + A project can create up to 5 corpora. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([corpus, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a retriever_service.UpdateCorpusRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, retriever_service.UpdateCorpusRequest): + request = retriever_service.UpdateCorpusRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if corpus is not None: + request.corpus = corpus + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_corpus] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("corpus.name", request.corpus.name),) + ), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_corpus( + self, + request: Optional[Union[retriever_service.DeleteCorpusRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a ``Corpus``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_delete_corpus(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.DeleteCorpusRequest( + name="name_value", + ) + + # Make the request + client.delete_corpus(request=request) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.DeleteCorpusRequest, dict]): + The request object. Request to delete a ``Corpus``. + name (str): + Required. The resource name of the ``Corpus``. Example: + ``corpora/my-corpus-123`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a retriever_service.DeleteCorpusRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, retriever_service.DeleteCorpusRequest): + request = retriever_service.DeleteCorpusRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_corpus] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def list_corpora( + self, + request: Optional[Union[retriever_service.ListCorporaRequest, dict]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListCorporaPager: + r"""Lists all ``Corpora`` owned by the user. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_list_corpora(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.ListCorporaRequest( + ) + + # Make the request + page_result = client.list_corpora(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.ListCorporaRequest, dict]): + The request object. Request for listing ``Corpora``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.services.retriever_service.pagers.ListCorporaPager: + Response from ListCorpora containing a paginated list of Corpora. + The results are sorted by ascending + corpus.create_time. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a retriever_service.ListCorporaRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, retriever_service.ListCorporaRequest): + request = retriever_service.ListCorporaRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_corpora] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListCorporaPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def query_corpus( + self, + request: Optional[Union[retriever_service.QueryCorpusRequest, dict]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever_service.QueryCorpusResponse: + r"""Performs semantic search over a ``Corpus``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_query_corpus(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.QueryCorpusRequest( + name="name_value", + query="query_value", + ) + + # Make the request + response = client.query_corpus(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.QueryCorpusRequest, dict]): + The request object. Request for querying a ``Corpus``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.QueryCorpusResponse: + Response from QueryCorpus containing a list of relevant + chunks. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a retriever_service.QueryCorpusRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, retriever_service.QueryCorpusRequest): + request = retriever_service.QueryCorpusRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.query_corpus] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def create_document( + self, + request: Optional[Union[retriever_service.CreateDocumentRequest, dict]] = None, + *, + parent: Optional[str] = None, + document: Optional[retriever.Document] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever.Document: + r"""Creates an empty ``Document``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_create_document(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.CreateDocumentRequest( + parent="parent_value", + ) + + # Make the request + response = client.create_document(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.CreateDocumentRequest, dict]): + The request object. Request to create a ``Document``. + parent (str): + Required. The name of the ``Corpus`` where this + ``Document`` will be created. Example: + ``corpora/my-corpus-123`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + document (google.ai.generativelanguage_v1beta.types.Document): + Required. The ``Document`` to create. + This corresponds to the ``document`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.Document: + A Document is a collection of Chunks. + A Corpus can have a maximum of 10,000 Documents. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, document]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a retriever_service.CreateDocumentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, retriever_service.CreateDocumentRequest): + request = retriever_service.CreateDocumentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if document is not None: + request.document = document + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_document] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_document( + self, + request: Optional[Union[retriever_service.GetDocumentRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever.Document: + r"""Gets information about a specific ``Document``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_get_document(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GetDocumentRequest( + name="name_value", + ) + + # Make the request + response = client.get_document(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.GetDocumentRequest, dict]): + The request object. Request for getting information about a specific + ``Document``. + name (str): + Required. The name of the ``Document`` to retrieve. + Example: ``corpora/my-corpus-123/documents/the-doc-abc`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.Document: + A Document is a collection of Chunks. + A Corpus can have a maximum of 10,000 Documents. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a retriever_service.GetDocumentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, retriever_service.GetDocumentRequest): + request = retriever_service.GetDocumentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_document] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_document( + self, + request: Optional[Union[retriever_service.UpdateDocumentRequest, dict]] = None, + *, + document: Optional[retriever.Document] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever.Document: + r"""Updates a ``Document``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_update_document(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.UpdateDocumentRequest( + ) + + # Make the request + response = client.update_document(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.UpdateDocumentRequest, dict]): + The request object. Request to update a ``Document``. + document (google.ai.generativelanguage_v1beta.types.Document): + Required. The ``Document`` to update. + This corresponds to the ``document`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The list of fields to update. Currently, this + only supports updating ``display_name`` and + ``custom_metadata``. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.Document: + A Document is a collection of Chunks. + A Corpus can have a maximum of 10,000 Documents. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([document, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a retriever_service.UpdateDocumentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, retriever_service.UpdateDocumentRequest): + request = retriever_service.UpdateDocumentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if document is not None: + request.document = document + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_document] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("document.name", request.document.name),) + ), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_document( + self, + request: Optional[Union[retriever_service.DeleteDocumentRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a ``Document``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_delete_document(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.DeleteDocumentRequest( + name="name_value", + ) + + # Make the request + client.delete_document(request=request) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.DeleteDocumentRequest, dict]): + The request object. Request to delete a ``Document``. + name (str): + Required. The resource name of the ``Document`` to + delete. Example: + ``corpora/my-corpus-123/documents/the-doc-abc`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a retriever_service.DeleteDocumentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, retriever_service.DeleteDocumentRequest): + request = retriever_service.DeleteDocumentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_document] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def list_documents( + self, + request: Optional[Union[retriever_service.ListDocumentsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDocumentsPager: + r"""Lists all ``Document``\ s in a ``Corpus``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_list_documents(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.ListDocumentsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_documents(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.ListDocumentsRequest, dict]): + The request object. Request for listing ``Document``\ s. + parent (str): + Required. The name of the ``Corpus`` containing + ``Document``\ s. Example: ``corpora/my-corpus-123`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.services.retriever_service.pagers.ListDocumentsPager: + Response from ListDocuments containing a paginated list of Documents. + The Documents are sorted by ascending + document.create_time. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a retriever_service.ListDocumentsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, retriever_service.ListDocumentsRequest): + request = retriever_service.ListDocumentsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_documents] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListDocumentsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def query_document( + self, + request: Optional[Union[retriever_service.QueryDocumentRequest, dict]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever_service.QueryDocumentResponse: + r"""Performs semantic search over a ``Document``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_query_document(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.QueryDocumentRequest( + name="name_value", + query="query_value", + ) + + # Make the request + response = client.query_document(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.QueryDocumentRequest, dict]): + The request object. Request for querying a ``Document``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.QueryDocumentResponse: + Response from QueryDocument containing a list of + relevant chunks. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a retriever_service.QueryDocumentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, retriever_service.QueryDocumentRequest): + request = retriever_service.QueryDocumentRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.query_document] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def create_chunk( + self, + request: Optional[Union[retriever_service.CreateChunkRequest, dict]] = None, + *, + parent: Optional[str] = None, + chunk: Optional[retriever.Chunk] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever.Chunk: + r"""Creates a ``Chunk``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_create_chunk(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + chunk = generativelanguage_v1beta.Chunk() + chunk.data.string_value = "string_value_value" + + request = generativelanguage_v1beta.CreateChunkRequest( + parent="parent_value", + chunk=chunk, + ) + + # Make the request + response = client.create_chunk(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.CreateChunkRequest, dict]): + The request object. Request to create a ``Chunk``. + parent (str): + Required. The name of the ``Document`` where this + ``Chunk`` will be created. Example: + ``corpora/my-corpus-123/documents/the-doc-abc`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + chunk (google.ai.generativelanguage_v1beta.types.Chunk): + Required. The ``Chunk`` to create. + This corresponds to the ``chunk`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.Chunk: + A Chunk is a subpart of a Document that is treated as an independent unit + for the purposes of vector representation and + storage. A Corpus can have a maximum of 1 million + Chunks. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, chunk]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a retriever_service.CreateChunkRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, retriever_service.CreateChunkRequest): + request = retriever_service.CreateChunkRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if chunk is not None: + request.chunk = chunk + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_chunk] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def batch_create_chunks( + self, + request: Optional[ + Union[retriever_service.BatchCreateChunksRequest, dict] + ] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever_service.BatchCreateChunksResponse: + r"""Batch create ``Chunk``\ s. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_batch_create_chunks(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + requests = generativelanguage_v1beta.CreateChunkRequest() + requests.parent = "parent_value" + requests.chunk.data.string_value = "string_value_value" + + request = generativelanguage_v1beta.BatchCreateChunksRequest( + requests=requests, + ) + + # Make the request + response = client.batch_create_chunks(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.BatchCreateChunksRequest, dict]): + The request object. Request to batch create ``Chunk``\ s. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.BatchCreateChunksResponse: + Response from BatchCreateChunks containing a list of + created Chunks. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a retriever_service.BatchCreateChunksRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, retriever_service.BatchCreateChunksRequest): + request = retriever_service.BatchCreateChunksRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.batch_create_chunks] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_chunk( + self, + request: Optional[Union[retriever_service.GetChunkRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever.Chunk: + r"""Gets information about a specific ``Chunk``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_get_chunk(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GetChunkRequest( + name="name_value", + ) + + # Make the request + response = client.get_chunk(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.GetChunkRequest, dict]): + The request object. Request for getting information about a specific + ``Chunk``. + name (str): + Required. The name of the ``Chunk`` to retrieve. + Example: + ``corpora/my-corpus-123/documents/the-doc-abc/chunks/some-chunk`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.Chunk: + A Chunk is a subpart of a Document that is treated as an independent unit + for the purposes of vector representation and + storage. A Corpus can have a maximum of 1 million + Chunks. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a retriever_service.GetChunkRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, retriever_service.GetChunkRequest): + request = retriever_service.GetChunkRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_chunk] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_chunk( + self, + request: Optional[Union[retriever_service.UpdateChunkRequest, dict]] = None, + *, + chunk: Optional[retriever.Chunk] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever.Chunk: + r"""Updates a ``Chunk``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_update_chunk(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + chunk = generativelanguage_v1beta.Chunk() + chunk.data.string_value = "string_value_value" + + request = generativelanguage_v1beta.UpdateChunkRequest( + chunk=chunk, + ) + + # Make the request + response = client.update_chunk(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.UpdateChunkRequest, dict]): + The request object. Request to update a ``Chunk``. + chunk (google.ai.generativelanguage_v1beta.types.Chunk): + Required. The ``Chunk`` to update. + This corresponds to the ``chunk`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The list of fields to update. Currently, this + only supports updating ``custom_metadata`` and ``data``. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.Chunk: + A Chunk is a subpart of a Document that is treated as an independent unit + for the purposes of vector representation and + storage. A Corpus can have a maximum of 1 million + Chunks. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([chunk, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a retriever_service.UpdateChunkRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, retriever_service.UpdateChunkRequest): + request = retriever_service.UpdateChunkRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if chunk is not None: + request.chunk = chunk + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_chunk] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("chunk.name", request.chunk.name),) + ), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def batch_update_chunks( + self, + request: Optional[ + Union[retriever_service.BatchUpdateChunksRequest, dict] + ] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever_service.BatchUpdateChunksResponse: + r"""Batch update ``Chunk``\ s. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_batch_update_chunks(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + requests = generativelanguage_v1beta.UpdateChunkRequest() + requests.chunk.data.string_value = "string_value_value" + + request = generativelanguage_v1beta.BatchUpdateChunksRequest( + requests=requests, + ) + + # Make the request + response = client.batch_update_chunks(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.BatchUpdateChunksRequest, dict]): + The request object. Request to batch update ``Chunk``\ s. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.BatchUpdateChunksResponse: + Response from BatchUpdateChunks containing a list of + updated Chunks. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a retriever_service.BatchUpdateChunksRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, retriever_service.BatchUpdateChunksRequest): + request = retriever_service.BatchUpdateChunksRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.batch_update_chunks] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_chunk( + self, + request: Optional[Union[retriever_service.DeleteChunkRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a ``Chunk``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_delete_chunk(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.DeleteChunkRequest( + name="name_value", + ) + + # Make the request + client.delete_chunk(request=request) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.DeleteChunkRequest, dict]): + The request object. Request to delete a ``Chunk``. + name (str): + Required. The resource name of the ``Chunk`` to delete. + Example: + ``corpora/my-corpus-123/documents/the-doc-abc/chunks/some-chunk`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a retriever_service.DeleteChunkRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, retriever_service.DeleteChunkRequest): + request = retriever_service.DeleteChunkRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_chunk] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def batch_delete_chunks( + self, + request: Optional[ + Union[retriever_service.BatchDeleteChunksRequest, dict] + ] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Batch delete ``Chunk``\ s. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_batch_delete_chunks(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + requests = generativelanguage_v1beta.DeleteChunkRequest() + requests.name = "name_value" + + request = generativelanguage_v1beta.BatchDeleteChunksRequest( + requests=requests, + ) + + # Make the request + client.batch_delete_chunks(request=request) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.BatchDeleteChunksRequest, dict]): + The request object. Request to batch delete ``Chunk``\ s. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a retriever_service.BatchDeleteChunksRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, retriever_service.BatchDeleteChunksRequest): + request = retriever_service.BatchDeleteChunksRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.batch_delete_chunks] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def list_chunks( + self, + request: Optional[Union[retriever_service.ListChunksRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListChunksPager: + r"""Lists all ``Chunk``\ s in a ``Document``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_list_chunks(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.ListChunksRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_chunks(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.ListChunksRequest, dict]): + The request object. Request for listing ``Chunk``\ s. + parent (str): + Required. The name of the ``Document`` containing + ``Chunk``\ s. Example: + ``corpora/my-corpus-123/documents/the-doc-abc`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.services.retriever_service.pagers.ListChunksPager: + Response from ListChunks containing a paginated list of Chunks. + The Chunks are sorted by ascending chunk.create_time. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a retriever_service.ListChunksRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, retriever_service.ListChunksRequest): + request = retriever_service.ListChunksRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_chunks] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListChunksPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self) -> "RetrieverServiceClient": + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("RetrieverServiceClient",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/pagers.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/pagers.py new file mode 100644 index 000000000000..98410f32afbf --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/pagers.py @@ -0,0 +1,411 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import ( + Any, + AsyncIterator, + Awaitable, + Callable, + Iterator, + Optional, + Sequence, + Tuple, +) + +from google.ai.generativelanguage_v1beta.types import retriever, retriever_service + + +class ListCorporaPager: + """A pager for iterating through ``list_corpora`` requests. + + This class thinly wraps an initial + :class:`google.ai.generativelanguage_v1beta.types.ListCorporaResponse` object, and + provides an ``__iter__`` method to iterate through its + ``corpora`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListCorpora`` requests and continue to iterate + through the ``corpora`` field on the + corresponding responses. + + All the usual :class:`google.ai.generativelanguage_v1beta.types.ListCorporaResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., retriever_service.ListCorporaResponse], + request: retriever_service.ListCorporaRequest, + response: retriever_service.ListCorporaResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.ai.generativelanguage_v1beta.types.ListCorporaRequest): + The initial request object. + response (google.ai.generativelanguage_v1beta.types.ListCorporaResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = retriever_service.ListCorporaRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[retriever_service.ListCorporaResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[retriever.Corpus]: + for page in self.pages: + yield from page.corpora + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListCorporaAsyncPager: + """A pager for iterating through ``list_corpora`` requests. + + This class thinly wraps an initial + :class:`google.ai.generativelanguage_v1beta.types.ListCorporaResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``corpora`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListCorpora`` requests and continue to iterate + through the ``corpora`` field on the + corresponding responses. + + All the usual :class:`google.ai.generativelanguage_v1beta.types.ListCorporaResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[retriever_service.ListCorporaResponse]], + request: retriever_service.ListCorporaRequest, + response: retriever_service.ListCorporaResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.ai.generativelanguage_v1beta.types.ListCorporaRequest): + The initial request object. + response (google.ai.generativelanguage_v1beta.types.ListCorporaResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = retriever_service.ListCorporaRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[retriever_service.ListCorporaResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[retriever.Corpus]: + async def async_generator(): + async for page in self.pages: + for response in page.corpora: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListDocumentsPager: + """A pager for iterating through ``list_documents`` requests. + + This class thinly wraps an initial + :class:`google.ai.generativelanguage_v1beta.types.ListDocumentsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``documents`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListDocuments`` requests and continue to iterate + through the ``documents`` field on the + corresponding responses. + + All the usual :class:`google.ai.generativelanguage_v1beta.types.ListDocumentsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., retriever_service.ListDocumentsResponse], + request: retriever_service.ListDocumentsRequest, + response: retriever_service.ListDocumentsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.ai.generativelanguage_v1beta.types.ListDocumentsRequest): + The initial request object. + response (google.ai.generativelanguage_v1beta.types.ListDocumentsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = retriever_service.ListDocumentsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[retriever_service.ListDocumentsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[retriever.Document]: + for page in self.pages: + yield from page.documents + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListDocumentsAsyncPager: + """A pager for iterating through ``list_documents`` requests. + + This class thinly wraps an initial + :class:`google.ai.generativelanguage_v1beta.types.ListDocumentsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``documents`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListDocuments`` requests and continue to iterate + through the ``documents`` field on the + corresponding responses. + + All the usual :class:`google.ai.generativelanguage_v1beta.types.ListDocumentsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[retriever_service.ListDocumentsResponse]], + request: retriever_service.ListDocumentsRequest, + response: retriever_service.ListDocumentsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.ai.generativelanguage_v1beta.types.ListDocumentsRequest): + The initial request object. + response (google.ai.generativelanguage_v1beta.types.ListDocumentsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = retriever_service.ListDocumentsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[retriever_service.ListDocumentsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[retriever.Document]: + async def async_generator(): + async for page in self.pages: + for response in page.documents: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListChunksPager: + """A pager for iterating through ``list_chunks`` requests. + + This class thinly wraps an initial + :class:`google.ai.generativelanguage_v1beta.types.ListChunksResponse` object, and + provides an ``__iter__`` method to iterate through its + ``chunks`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListChunks`` requests and continue to iterate + through the ``chunks`` field on the + corresponding responses. + + All the usual :class:`google.ai.generativelanguage_v1beta.types.ListChunksResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., retriever_service.ListChunksResponse], + request: retriever_service.ListChunksRequest, + response: retriever_service.ListChunksResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.ai.generativelanguage_v1beta.types.ListChunksRequest): + The initial request object. + response (google.ai.generativelanguage_v1beta.types.ListChunksResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = retriever_service.ListChunksRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[retriever_service.ListChunksResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[retriever.Chunk]: + for page in self.pages: + yield from page.chunks + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListChunksAsyncPager: + """A pager for iterating through ``list_chunks`` requests. + + This class thinly wraps an initial + :class:`google.ai.generativelanguage_v1beta.types.ListChunksResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``chunks`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListChunks`` requests and continue to iterate + through the ``chunks`` field on the + corresponding responses. + + All the usual :class:`google.ai.generativelanguage_v1beta.types.ListChunksResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[retriever_service.ListChunksResponse]], + request: retriever_service.ListChunksRequest, + response: retriever_service.ListChunksResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.ai.generativelanguage_v1beta.types.ListChunksRequest): + The initial request object. + response (google.ai.generativelanguage_v1beta.types.ListChunksResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = retriever_service.ListChunksRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[retriever_service.ListChunksResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[retriever.Chunk]: + async def async_generator(): + async for page in self.pages: + for response in page.chunks: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/transports/__init__.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/transports/__init__.py new file mode 100644 index 000000000000..7f8233ec6de5 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/transports/__init__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import RetrieverServiceTransport +from .grpc import RetrieverServiceGrpcTransport +from .grpc_asyncio import RetrieverServiceGrpcAsyncIOTransport +from .rest import RetrieverServiceRestInterceptor, RetrieverServiceRestTransport + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[RetrieverServiceTransport]] +_transport_registry["grpc"] = RetrieverServiceGrpcTransport +_transport_registry["grpc_asyncio"] = RetrieverServiceGrpcAsyncIOTransport +_transport_registry["rest"] = RetrieverServiceRestTransport + +__all__ = ( + "RetrieverServiceTransport", + "RetrieverServiceGrpcTransport", + "RetrieverServiceGrpcAsyncIOTransport", + "RetrieverServiceRestTransport", + "RetrieverServiceRestInterceptor", +) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/transports/base.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/transports/base.py new file mode 100644 index 000000000000..8d0d0c05b46f --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/transports/base.py @@ -0,0 +1,588 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account # type: ignore +from google.protobuf import empty_pb2 # type: ignore + +from google.ai.generativelanguage_v1beta import gapic_version as package_version +from google.ai.generativelanguage_v1beta.types import retriever, retriever_service + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +class RetrieverServiceTransport(abc.ABC): + """Abstract transport class for RetrieverService.""" + + AUTH_SCOPES = () + + DEFAULT_HOST: str = "generativelanguage.googleapis.com" + + def __init__( + self, + *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default( + **scopes_kwargs, quota_project_id=quota_project_id + ) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience( + api_audience if api_audience else host + ) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if ( + always_use_jwt_access + and isinstance(credentials, service_account.Credentials) + and hasattr(service_account.Credentials, "with_always_use_jwt_access") + ): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_corpus: gapic_v1.method.wrap_method( + self.create_corpus, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_corpus: gapic_v1.method.wrap_method( + self.get_corpus, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.update_corpus: gapic_v1.method.wrap_method( + self.update_corpus, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.delete_corpus: gapic_v1.method.wrap_method( + self.delete_corpus, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_corpora: gapic_v1.method.wrap_method( + self.list_corpora, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.query_corpus: gapic_v1.method.wrap_method( + self.query_corpus, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.create_document: gapic_v1.method.wrap_method( + self.create_document, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_document: gapic_v1.method.wrap_method( + self.get_document, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.update_document: gapic_v1.method.wrap_method( + self.update_document, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.delete_document: gapic_v1.method.wrap_method( + self.delete_document, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_documents: gapic_v1.method.wrap_method( + self.list_documents, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.query_document: gapic_v1.method.wrap_method( + self.query_document, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.create_chunk: gapic_v1.method.wrap_method( + self.create_chunk, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.batch_create_chunks: gapic_v1.method.wrap_method( + self.batch_create_chunks, + default_timeout=None, + client_info=client_info, + ), + self.get_chunk: gapic_v1.method.wrap_method( + self.get_chunk, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.update_chunk: gapic_v1.method.wrap_method( + self.update_chunk, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.batch_update_chunks: gapic_v1.method.wrap_method( + self.batch_update_chunks, + default_timeout=None, + client_info=client_info, + ), + self.delete_chunk: gapic_v1.method.wrap_method( + self.delete_chunk, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.batch_delete_chunks: gapic_v1.method.wrap_method( + self.batch_delete_chunks, + default_timeout=None, + client_info=client_info, + ), + self.list_chunks: gapic_v1.method.wrap_method( + self.list_chunks, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def create_corpus( + self, + ) -> Callable[ + [retriever_service.CreateCorpusRequest], + Union[retriever.Corpus, Awaitable[retriever.Corpus]], + ]: + raise NotImplementedError() + + @property + def get_corpus( + self, + ) -> Callable[ + [retriever_service.GetCorpusRequest], + Union[retriever.Corpus, Awaitable[retriever.Corpus]], + ]: + raise NotImplementedError() + + @property + def update_corpus( + self, + ) -> Callable[ + [retriever_service.UpdateCorpusRequest], + Union[retriever.Corpus, Awaitable[retriever.Corpus]], + ]: + raise NotImplementedError() + + @property + def delete_corpus( + self, + ) -> Callable[ + [retriever_service.DeleteCorpusRequest], + Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], + ]: + raise NotImplementedError() + + @property + def list_corpora( + self, + ) -> Callable[ + [retriever_service.ListCorporaRequest], + Union[ + retriever_service.ListCorporaResponse, + Awaitable[retriever_service.ListCorporaResponse], + ], + ]: + raise NotImplementedError() + + @property + def query_corpus( + self, + ) -> Callable[ + [retriever_service.QueryCorpusRequest], + Union[ + retriever_service.QueryCorpusResponse, + Awaitable[retriever_service.QueryCorpusResponse], + ], + ]: + raise NotImplementedError() + + @property + def create_document( + self, + ) -> Callable[ + [retriever_service.CreateDocumentRequest], + Union[retriever.Document, Awaitable[retriever.Document]], + ]: + raise NotImplementedError() + + @property + def get_document( + self, + ) -> Callable[ + [retriever_service.GetDocumentRequest], + Union[retriever.Document, Awaitable[retriever.Document]], + ]: + raise NotImplementedError() + + @property + def update_document( + self, + ) -> Callable[ + [retriever_service.UpdateDocumentRequest], + Union[retriever.Document, Awaitable[retriever.Document]], + ]: + raise NotImplementedError() + + @property + def delete_document( + self, + ) -> Callable[ + [retriever_service.DeleteDocumentRequest], + Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], + ]: + raise NotImplementedError() + + @property + def list_documents( + self, + ) -> Callable[ + [retriever_service.ListDocumentsRequest], + Union[ + retriever_service.ListDocumentsResponse, + Awaitable[retriever_service.ListDocumentsResponse], + ], + ]: + raise NotImplementedError() + + @property + def query_document( + self, + ) -> Callable[ + [retriever_service.QueryDocumentRequest], + Union[ + retriever_service.QueryDocumentResponse, + Awaitable[retriever_service.QueryDocumentResponse], + ], + ]: + raise NotImplementedError() + + @property + def create_chunk( + self, + ) -> Callable[ + [retriever_service.CreateChunkRequest], + Union[retriever.Chunk, Awaitable[retriever.Chunk]], + ]: + raise NotImplementedError() + + @property + def batch_create_chunks( + self, + ) -> Callable[ + [retriever_service.BatchCreateChunksRequest], + Union[ + retriever_service.BatchCreateChunksResponse, + Awaitable[retriever_service.BatchCreateChunksResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_chunk( + self, + ) -> Callable[ + [retriever_service.GetChunkRequest], + Union[retriever.Chunk, Awaitable[retriever.Chunk]], + ]: + raise NotImplementedError() + + @property + def update_chunk( + self, + ) -> Callable[ + [retriever_service.UpdateChunkRequest], + Union[retriever.Chunk, Awaitable[retriever.Chunk]], + ]: + raise NotImplementedError() + + @property + def batch_update_chunks( + self, + ) -> Callable[ + [retriever_service.BatchUpdateChunksRequest], + Union[ + retriever_service.BatchUpdateChunksResponse, + Awaitable[retriever_service.BatchUpdateChunksResponse], + ], + ]: + raise NotImplementedError() + + @property + def delete_chunk( + self, + ) -> Callable[ + [retriever_service.DeleteChunkRequest], + Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], + ]: + raise NotImplementedError() + + @property + def batch_delete_chunks( + self, + ) -> Callable[ + [retriever_service.BatchDeleteChunksRequest], + Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], + ]: + raise NotImplementedError() + + @property + def list_chunks( + self, + ) -> Callable[ + [retriever_service.ListChunksRequest], + Union[ + retriever_service.ListChunksResponse, + Awaitable[retriever_service.ListChunksResponse], + ], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ("RetrieverServiceTransport",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/transports/grpc.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/transports/grpc.py new file mode 100644 index 000000000000..456aab78ac6b --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/transports/grpc.py @@ -0,0 +1,780 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +from google.api_core import gapic_v1, grpc_helpers +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +import grpc # type: ignore + +from google.ai.generativelanguage_v1beta.types import retriever, retriever_service + +from .base import DEFAULT_CLIENT_INFO, RetrieverServiceTransport + + +class RetrieverServiceGrpcTransport(RetrieverServiceTransport): + """gRPC backend transport for RetrieverService. + + An API for semantic search over a corpus of user uploaded + content. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service.""" + return self._grpc_channel + + @property + def create_corpus( + self, + ) -> Callable[[retriever_service.CreateCorpusRequest], retriever.Corpus]: + r"""Return a callable for the create corpus method over gRPC. + + Creates an empty ``Corpus``. + + Returns: + Callable[[~.CreateCorpusRequest], + ~.Corpus]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_corpus" not in self._stubs: + self._stubs["create_corpus"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/CreateCorpus", + request_serializer=retriever_service.CreateCorpusRequest.serialize, + response_deserializer=retriever.Corpus.deserialize, + ) + return self._stubs["create_corpus"] + + @property + def get_corpus( + self, + ) -> Callable[[retriever_service.GetCorpusRequest], retriever.Corpus]: + r"""Return a callable for the get corpus method over gRPC. + + Gets information about a specific ``Corpus``. + + Returns: + Callable[[~.GetCorpusRequest], + ~.Corpus]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_corpus" not in self._stubs: + self._stubs["get_corpus"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/GetCorpus", + request_serializer=retriever_service.GetCorpusRequest.serialize, + response_deserializer=retriever.Corpus.deserialize, + ) + return self._stubs["get_corpus"] + + @property + def update_corpus( + self, + ) -> Callable[[retriever_service.UpdateCorpusRequest], retriever.Corpus]: + r"""Return a callable for the update corpus method over gRPC. + + Updates a ``Corpus``. + + Returns: + Callable[[~.UpdateCorpusRequest], + ~.Corpus]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_corpus" not in self._stubs: + self._stubs["update_corpus"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/UpdateCorpus", + request_serializer=retriever_service.UpdateCorpusRequest.serialize, + response_deserializer=retriever.Corpus.deserialize, + ) + return self._stubs["update_corpus"] + + @property + def delete_corpus( + self, + ) -> Callable[[retriever_service.DeleteCorpusRequest], empty_pb2.Empty]: + r"""Return a callable for the delete corpus method over gRPC. + + Deletes a ``Corpus``. + + Returns: + Callable[[~.DeleteCorpusRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_corpus" not in self._stubs: + self._stubs["delete_corpus"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/DeleteCorpus", + request_serializer=retriever_service.DeleteCorpusRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["delete_corpus"] + + @property + def list_corpora( + self, + ) -> Callable[ + [retriever_service.ListCorporaRequest], retriever_service.ListCorporaResponse + ]: + r"""Return a callable for the list corpora method over gRPC. + + Lists all ``Corpora`` owned by the user. + + Returns: + Callable[[~.ListCorporaRequest], + ~.ListCorporaResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_corpora" not in self._stubs: + self._stubs["list_corpora"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/ListCorpora", + request_serializer=retriever_service.ListCorporaRequest.serialize, + response_deserializer=retriever_service.ListCorporaResponse.deserialize, + ) + return self._stubs["list_corpora"] + + @property + def query_corpus( + self, + ) -> Callable[ + [retriever_service.QueryCorpusRequest], retriever_service.QueryCorpusResponse + ]: + r"""Return a callable for the query corpus method over gRPC. + + Performs semantic search over a ``Corpus``. + + Returns: + Callable[[~.QueryCorpusRequest], + ~.QueryCorpusResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "query_corpus" not in self._stubs: + self._stubs["query_corpus"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/QueryCorpus", + request_serializer=retriever_service.QueryCorpusRequest.serialize, + response_deserializer=retriever_service.QueryCorpusResponse.deserialize, + ) + return self._stubs["query_corpus"] + + @property + def create_document( + self, + ) -> Callable[[retriever_service.CreateDocumentRequest], retriever.Document]: + r"""Return a callable for the create document method over gRPC. + + Creates an empty ``Document``. + + Returns: + Callable[[~.CreateDocumentRequest], + ~.Document]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_document" not in self._stubs: + self._stubs["create_document"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/CreateDocument", + request_serializer=retriever_service.CreateDocumentRequest.serialize, + response_deserializer=retriever.Document.deserialize, + ) + return self._stubs["create_document"] + + @property + def get_document( + self, + ) -> Callable[[retriever_service.GetDocumentRequest], retriever.Document]: + r"""Return a callable for the get document method over gRPC. + + Gets information about a specific ``Document``. + + Returns: + Callable[[~.GetDocumentRequest], + ~.Document]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_document" not in self._stubs: + self._stubs["get_document"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/GetDocument", + request_serializer=retriever_service.GetDocumentRequest.serialize, + response_deserializer=retriever.Document.deserialize, + ) + return self._stubs["get_document"] + + @property + def update_document( + self, + ) -> Callable[[retriever_service.UpdateDocumentRequest], retriever.Document]: + r"""Return a callable for the update document method over gRPC. + + Updates a ``Document``. + + Returns: + Callable[[~.UpdateDocumentRequest], + ~.Document]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_document" not in self._stubs: + self._stubs["update_document"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/UpdateDocument", + request_serializer=retriever_service.UpdateDocumentRequest.serialize, + response_deserializer=retriever.Document.deserialize, + ) + return self._stubs["update_document"] + + @property + def delete_document( + self, + ) -> Callable[[retriever_service.DeleteDocumentRequest], empty_pb2.Empty]: + r"""Return a callable for the delete document method over gRPC. + + Deletes a ``Document``. + + Returns: + Callable[[~.DeleteDocumentRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_document" not in self._stubs: + self._stubs["delete_document"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/DeleteDocument", + request_serializer=retriever_service.DeleteDocumentRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["delete_document"] + + @property + def list_documents( + self, + ) -> Callable[ + [retriever_service.ListDocumentsRequest], + retriever_service.ListDocumentsResponse, + ]: + r"""Return a callable for the list documents method over gRPC. + + Lists all ``Document``\ s in a ``Corpus``. + + Returns: + Callable[[~.ListDocumentsRequest], + ~.ListDocumentsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_documents" not in self._stubs: + self._stubs["list_documents"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/ListDocuments", + request_serializer=retriever_service.ListDocumentsRequest.serialize, + response_deserializer=retriever_service.ListDocumentsResponse.deserialize, + ) + return self._stubs["list_documents"] + + @property + def query_document( + self, + ) -> Callable[ + [retriever_service.QueryDocumentRequest], + retriever_service.QueryDocumentResponse, + ]: + r"""Return a callable for the query document method over gRPC. + + Performs semantic search over a ``Document``. + + Returns: + Callable[[~.QueryDocumentRequest], + ~.QueryDocumentResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "query_document" not in self._stubs: + self._stubs["query_document"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/QueryDocument", + request_serializer=retriever_service.QueryDocumentRequest.serialize, + response_deserializer=retriever_service.QueryDocumentResponse.deserialize, + ) + return self._stubs["query_document"] + + @property + def create_chunk( + self, + ) -> Callable[[retriever_service.CreateChunkRequest], retriever.Chunk]: + r"""Return a callable for the create chunk method over gRPC. + + Creates a ``Chunk``. + + Returns: + Callable[[~.CreateChunkRequest], + ~.Chunk]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_chunk" not in self._stubs: + self._stubs["create_chunk"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/CreateChunk", + request_serializer=retriever_service.CreateChunkRequest.serialize, + response_deserializer=retriever.Chunk.deserialize, + ) + return self._stubs["create_chunk"] + + @property + def batch_create_chunks( + self, + ) -> Callable[ + [retriever_service.BatchCreateChunksRequest], + retriever_service.BatchCreateChunksResponse, + ]: + r"""Return a callable for the batch create chunks method over gRPC. + + Batch create ``Chunk``\ s. + + Returns: + Callable[[~.BatchCreateChunksRequest], + ~.BatchCreateChunksResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_create_chunks" not in self._stubs: + self._stubs["batch_create_chunks"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/BatchCreateChunks", + request_serializer=retriever_service.BatchCreateChunksRequest.serialize, + response_deserializer=retriever_service.BatchCreateChunksResponse.deserialize, + ) + return self._stubs["batch_create_chunks"] + + @property + def get_chunk( + self, + ) -> Callable[[retriever_service.GetChunkRequest], retriever.Chunk]: + r"""Return a callable for the get chunk method over gRPC. + + Gets information about a specific ``Chunk``. + + Returns: + Callable[[~.GetChunkRequest], + ~.Chunk]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_chunk" not in self._stubs: + self._stubs["get_chunk"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/GetChunk", + request_serializer=retriever_service.GetChunkRequest.serialize, + response_deserializer=retriever.Chunk.deserialize, + ) + return self._stubs["get_chunk"] + + @property + def update_chunk( + self, + ) -> Callable[[retriever_service.UpdateChunkRequest], retriever.Chunk]: + r"""Return a callable for the update chunk method over gRPC. + + Updates a ``Chunk``. + + Returns: + Callable[[~.UpdateChunkRequest], + ~.Chunk]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_chunk" not in self._stubs: + self._stubs["update_chunk"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/UpdateChunk", + request_serializer=retriever_service.UpdateChunkRequest.serialize, + response_deserializer=retriever.Chunk.deserialize, + ) + return self._stubs["update_chunk"] + + @property + def batch_update_chunks( + self, + ) -> Callable[ + [retriever_service.BatchUpdateChunksRequest], + retriever_service.BatchUpdateChunksResponse, + ]: + r"""Return a callable for the batch update chunks method over gRPC. + + Batch update ``Chunk``\ s. + + Returns: + Callable[[~.BatchUpdateChunksRequest], + ~.BatchUpdateChunksResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_update_chunks" not in self._stubs: + self._stubs["batch_update_chunks"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/BatchUpdateChunks", + request_serializer=retriever_service.BatchUpdateChunksRequest.serialize, + response_deserializer=retriever_service.BatchUpdateChunksResponse.deserialize, + ) + return self._stubs["batch_update_chunks"] + + @property + def delete_chunk( + self, + ) -> Callable[[retriever_service.DeleteChunkRequest], empty_pb2.Empty]: + r"""Return a callable for the delete chunk method over gRPC. + + Deletes a ``Chunk``. + + Returns: + Callable[[~.DeleteChunkRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_chunk" not in self._stubs: + self._stubs["delete_chunk"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/DeleteChunk", + request_serializer=retriever_service.DeleteChunkRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["delete_chunk"] + + @property + def batch_delete_chunks( + self, + ) -> Callable[[retriever_service.BatchDeleteChunksRequest], empty_pb2.Empty]: + r"""Return a callable for the batch delete chunks method over gRPC. + + Batch delete ``Chunk``\ s. + + Returns: + Callable[[~.BatchDeleteChunksRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_delete_chunks" not in self._stubs: + self._stubs["batch_delete_chunks"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/BatchDeleteChunks", + request_serializer=retriever_service.BatchDeleteChunksRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["batch_delete_chunks"] + + @property + def list_chunks( + self, + ) -> Callable[ + [retriever_service.ListChunksRequest], retriever_service.ListChunksResponse + ]: + r"""Return a callable for the list chunks method over gRPC. + + Lists all ``Chunk``\ s in a ``Document``. + + Returns: + Callable[[~.ListChunksRequest], + ~.ListChunksResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_chunks" not in self._stubs: + self._stubs["list_chunks"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/ListChunks", + request_serializer=retriever_service.ListChunksRequest.serialize, + response_deserializer=retriever_service.ListChunksResponse.deserialize, + ) + return self._stubs["list_chunks"] + + def close(self): + self.grpc_channel.close() + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ("RetrieverServiceGrpcTransport",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/transports/grpc_asyncio.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/transports/grpc_asyncio.py new file mode 100644 index 000000000000..f5ddf0b50dc0 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/transports/grpc_asyncio.py @@ -0,0 +1,792 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +from google.api_core import gapic_v1, grpc_helpers_async +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.ai.generativelanguage_v1beta.types import retriever, retriever_service + +from .base import DEFAULT_CLIENT_INFO, RetrieverServiceTransport +from .grpc import RetrieverServiceGrpcTransport + + +class RetrieverServiceGrpcAsyncIOTransport(RetrieverServiceTransport): + """gRPC AsyncIO backend transport for RetrieverService. + + An API for semantic search over a corpus of user uploaded + content. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def create_corpus( + self, + ) -> Callable[[retriever_service.CreateCorpusRequest], Awaitable[retriever.Corpus]]: + r"""Return a callable for the create corpus method over gRPC. + + Creates an empty ``Corpus``. + + Returns: + Callable[[~.CreateCorpusRequest], + Awaitable[~.Corpus]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_corpus" not in self._stubs: + self._stubs["create_corpus"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/CreateCorpus", + request_serializer=retriever_service.CreateCorpusRequest.serialize, + response_deserializer=retriever.Corpus.deserialize, + ) + return self._stubs["create_corpus"] + + @property + def get_corpus( + self, + ) -> Callable[[retriever_service.GetCorpusRequest], Awaitable[retriever.Corpus]]: + r"""Return a callable for the get corpus method over gRPC. + + Gets information about a specific ``Corpus``. + + Returns: + Callable[[~.GetCorpusRequest], + Awaitable[~.Corpus]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_corpus" not in self._stubs: + self._stubs["get_corpus"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/GetCorpus", + request_serializer=retriever_service.GetCorpusRequest.serialize, + response_deserializer=retriever.Corpus.deserialize, + ) + return self._stubs["get_corpus"] + + @property + def update_corpus( + self, + ) -> Callable[[retriever_service.UpdateCorpusRequest], Awaitable[retriever.Corpus]]: + r"""Return a callable for the update corpus method over gRPC. + + Updates a ``Corpus``. + + Returns: + Callable[[~.UpdateCorpusRequest], + Awaitable[~.Corpus]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_corpus" not in self._stubs: + self._stubs["update_corpus"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/UpdateCorpus", + request_serializer=retriever_service.UpdateCorpusRequest.serialize, + response_deserializer=retriever.Corpus.deserialize, + ) + return self._stubs["update_corpus"] + + @property + def delete_corpus( + self, + ) -> Callable[[retriever_service.DeleteCorpusRequest], Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the delete corpus method over gRPC. + + Deletes a ``Corpus``. + + Returns: + Callable[[~.DeleteCorpusRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_corpus" not in self._stubs: + self._stubs["delete_corpus"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/DeleteCorpus", + request_serializer=retriever_service.DeleteCorpusRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["delete_corpus"] + + @property + def list_corpora( + self, + ) -> Callable[ + [retriever_service.ListCorporaRequest], + Awaitable[retriever_service.ListCorporaResponse], + ]: + r"""Return a callable for the list corpora method over gRPC. + + Lists all ``Corpora`` owned by the user. + + Returns: + Callable[[~.ListCorporaRequest], + Awaitable[~.ListCorporaResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_corpora" not in self._stubs: + self._stubs["list_corpora"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/ListCorpora", + request_serializer=retriever_service.ListCorporaRequest.serialize, + response_deserializer=retriever_service.ListCorporaResponse.deserialize, + ) + return self._stubs["list_corpora"] + + @property + def query_corpus( + self, + ) -> Callable[ + [retriever_service.QueryCorpusRequest], + Awaitable[retriever_service.QueryCorpusResponse], + ]: + r"""Return a callable for the query corpus method over gRPC. + + Performs semantic search over a ``Corpus``. + + Returns: + Callable[[~.QueryCorpusRequest], + Awaitable[~.QueryCorpusResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "query_corpus" not in self._stubs: + self._stubs["query_corpus"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/QueryCorpus", + request_serializer=retriever_service.QueryCorpusRequest.serialize, + response_deserializer=retriever_service.QueryCorpusResponse.deserialize, + ) + return self._stubs["query_corpus"] + + @property + def create_document( + self, + ) -> Callable[ + [retriever_service.CreateDocumentRequest], Awaitable[retriever.Document] + ]: + r"""Return a callable for the create document method over gRPC. + + Creates an empty ``Document``. + + Returns: + Callable[[~.CreateDocumentRequest], + Awaitable[~.Document]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_document" not in self._stubs: + self._stubs["create_document"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/CreateDocument", + request_serializer=retriever_service.CreateDocumentRequest.serialize, + response_deserializer=retriever.Document.deserialize, + ) + return self._stubs["create_document"] + + @property + def get_document( + self, + ) -> Callable[ + [retriever_service.GetDocumentRequest], Awaitable[retriever.Document] + ]: + r"""Return a callable for the get document method over gRPC. + + Gets information about a specific ``Document``. + + Returns: + Callable[[~.GetDocumentRequest], + Awaitable[~.Document]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_document" not in self._stubs: + self._stubs["get_document"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/GetDocument", + request_serializer=retriever_service.GetDocumentRequest.serialize, + response_deserializer=retriever.Document.deserialize, + ) + return self._stubs["get_document"] + + @property + def update_document( + self, + ) -> Callable[ + [retriever_service.UpdateDocumentRequest], Awaitable[retriever.Document] + ]: + r"""Return a callable for the update document method over gRPC. + + Updates a ``Document``. + + Returns: + Callable[[~.UpdateDocumentRequest], + Awaitable[~.Document]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_document" not in self._stubs: + self._stubs["update_document"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/UpdateDocument", + request_serializer=retriever_service.UpdateDocumentRequest.serialize, + response_deserializer=retriever.Document.deserialize, + ) + return self._stubs["update_document"] + + @property + def delete_document( + self, + ) -> Callable[ + [retriever_service.DeleteDocumentRequest], Awaitable[empty_pb2.Empty] + ]: + r"""Return a callable for the delete document method over gRPC. + + Deletes a ``Document``. + + Returns: + Callable[[~.DeleteDocumentRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_document" not in self._stubs: + self._stubs["delete_document"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/DeleteDocument", + request_serializer=retriever_service.DeleteDocumentRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["delete_document"] + + @property + def list_documents( + self, + ) -> Callable[ + [retriever_service.ListDocumentsRequest], + Awaitable[retriever_service.ListDocumentsResponse], + ]: + r"""Return a callable for the list documents method over gRPC. + + Lists all ``Document``\ s in a ``Corpus``. + + Returns: + Callable[[~.ListDocumentsRequest], + Awaitable[~.ListDocumentsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_documents" not in self._stubs: + self._stubs["list_documents"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/ListDocuments", + request_serializer=retriever_service.ListDocumentsRequest.serialize, + response_deserializer=retriever_service.ListDocumentsResponse.deserialize, + ) + return self._stubs["list_documents"] + + @property + def query_document( + self, + ) -> Callable[ + [retriever_service.QueryDocumentRequest], + Awaitable[retriever_service.QueryDocumentResponse], + ]: + r"""Return a callable for the query document method over gRPC. + + Performs semantic search over a ``Document``. + + Returns: + Callable[[~.QueryDocumentRequest], + Awaitable[~.QueryDocumentResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "query_document" not in self._stubs: + self._stubs["query_document"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/QueryDocument", + request_serializer=retriever_service.QueryDocumentRequest.serialize, + response_deserializer=retriever_service.QueryDocumentResponse.deserialize, + ) + return self._stubs["query_document"] + + @property + def create_chunk( + self, + ) -> Callable[[retriever_service.CreateChunkRequest], Awaitable[retriever.Chunk]]: + r"""Return a callable for the create chunk method over gRPC. + + Creates a ``Chunk``. + + Returns: + Callable[[~.CreateChunkRequest], + Awaitable[~.Chunk]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_chunk" not in self._stubs: + self._stubs["create_chunk"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/CreateChunk", + request_serializer=retriever_service.CreateChunkRequest.serialize, + response_deserializer=retriever.Chunk.deserialize, + ) + return self._stubs["create_chunk"] + + @property + def batch_create_chunks( + self, + ) -> Callable[ + [retriever_service.BatchCreateChunksRequest], + Awaitable[retriever_service.BatchCreateChunksResponse], + ]: + r"""Return a callable for the batch create chunks method over gRPC. + + Batch create ``Chunk``\ s. + + Returns: + Callable[[~.BatchCreateChunksRequest], + Awaitable[~.BatchCreateChunksResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_create_chunks" not in self._stubs: + self._stubs["batch_create_chunks"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/BatchCreateChunks", + request_serializer=retriever_service.BatchCreateChunksRequest.serialize, + response_deserializer=retriever_service.BatchCreateChunksResponse.deserialize, + ) + return self._stubs["batch_create_chunks"] + + @property + def get_chunk( + self, + ) -> Callable[[retriever_service.GetChunkRequest], Awaitable[retriever.Chunk]]: + r"""Return a callable for the get chunk method over gRPC. + + Gets information about a specific ``Chunk``. + + Returns: + Callable[[~.GetChunkRequest], + Awaitable[~.Chunk]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_chunk" not in self._stubs: + self._stubs["get_chunk"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/GetChunk", + request_serializer=retriever_service.GetChunkRequest.serialize, + response_deserializer=retriever.Chunk.deserialize, + ) + return self._stubs["get_chunk"] + + @property + def update_chunk( + self, + ) -> Callable[[retriever_service.UpdateChunkRequest], Awaitable[retriever.Chunk]]: + r"""Return a callable for the update chunk method over gRPC. + + Updates a ``Chunk``. + + Returns: + Callable[[~.UpdateChunkRequest], + Awaitable[~.Chunk]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_chunk" not in self._stubs: + self._stubs["update_chunk"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/UpdateChunk", + request_serializer=retriever_service.UpdateChunkRequest.serialize, + response_deserializer=retriever.Chunk.deserialize, + ) + return self._stubs["update_chunk"] + + @property + def batch_update_chunks( + self, + ) -> Callable[ + [retriever_service.BatchUpdateChunksRequest], + Awaitable[retriever_service.BatchUpdateChunksResponse], + ]: + r"""Return a callable for the batch update chunks method over gRPC. + + Batch update ``Chunk``\ s. + + Returns: + Callable[[~.BatchUpdateChunksRequest], + Awaitable[~.BatchUpdateChunksResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_update_chunks" not in self._stubs: + self._stubs["batch_update_chunks"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/BatchUpdateChunks", + request_serializer=retriever_service.BatchUpdateChunksRequest.serialize, + response_deserializer=retriever_service.BatchUpdateChunksResponse.deserialize, + ) + return self._stubs["batch_update_chunks"] + + @property + def delete_chunk( + self, + ) -> Callable[[retriever_service.DeleteChunkRequest], Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the delete chunk method over gRPC. + + Deletes a ``Chunk``. + + Returns: + Callable[[~.DeleteChunkRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_chunk" not in self._stubs: + self._stubs["delete_chunk"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/DeleteChunk", + request_serializer=retriever_service.DeleteChunkRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["delete_chunk"] + + @property + def batch_delete_chunks( + self, + ) -> Callable[ + [retriever_service.BatchDeleteChunksRequest], Awaitable[empty_pb2.Empty] + ]: + r"""Return a callable for the batch delete chunks method over gRPC. + + Batch delete ``Chunk``\ s. + + Returns: + Callable[[~.BatchDeleteChunksRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_delete_chunks" not in self._stubs: + self._stubs["batch_delete_chunks"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/BatchDeleteChunks", + request_serializer=retriever_service.BatchDeleteChunksRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["batch_delete_chunks"] + + @property + def list_chunks( + self, + ) -> Callable[ + [retriever_service.ListChunksRequest], + Awaitable[retriever_service.ListChunksResponse], + ]: + r"""Return a callable for the list chunks method over gRPC. + + Lists all ``Chunk``\ s in a ``Document``. + + Returns: + Callable[[~.ListChunksRequest], + Awaitable[~.ListChunksResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_chunks" not in self._stubs: + self._stubs["list_chunks"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.RetrieverService/ListChunks", + request_serializer=retriever_service.ListChunksRequest.serialize, + response_deserializer=retriever_service.ListChunksResponse.deserialize, + ) + return self._stubs["list_chunks"] + + def close(self): + return self.grpc_channel.close() + + +__all__ = ("RetrieverServiceGrpcAsyncIOTransport",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/transports/rest.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/transports/rest.py new file mode 100644 index 000000000000..a1e2399d50c3 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/retriever_service/transports/rest.py @@ -0,0 +1,2717 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import dataclasses +import json # type: ignore +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + +from google.api_core import gapic_v1, path_template, rest_helpers, rest_streaming +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.transport.requests import AuthorizedSession # type: ignore +from google.protobuf import json_format +import grpc # type: ignore +from requests import __version__ as requests_version + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + + +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore + +from google.ai.generativelanguage_v1beta.types import retriever, retriever_service + +from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO +from .base import RetrieverServiceTransport + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + + +class RetrieverServiceRestInterceptor: + """Interceptor for RetrieverService. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the RetrieverServiceRestTransport. + + .. code-block:: python + class MyCustomRetrieverServiceInterceptor(RetrieverServiceRestInterceptor): + def pre_batch_create_chunks(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_batch_create_chunks(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_batch_delete_chunks(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def pre_batch_update_chunks(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_batch_update_chunks(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_create_chunk(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_chunk(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_create_corpus(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_corpus(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_create_document(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_document(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_delete_chunk(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def pre_delete_corpus(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def pre_delete_document(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def pre_get_chunk(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_chunk(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_corpus(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_corpus(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_document(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_document(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_chunks(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_chunks(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_corpora(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_corpora(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_documents(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_documents(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_query_corpus(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_query_corpus(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_query_document(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_query_document(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_update_chunk(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_chunk(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_update_corpus(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_corpus(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_update_document(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_document(self, response): + logging.log(f"Received response: {response}") + return response + + transport = RetrieverServiceRestTransport(interceptor=MyCustomRetrieverServiceInterceptor()) + client = RetrieverServiceClient(transport=transport) + + + """ + + def pre_batch_create_chunks( + self, + request: retriever_service.BatchCreateChunksRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[retriever_service.BatchCreateChunksRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for batch_create_chunks + + Override in a subclass to manipulate the request or metadata + before they are sent to the RetrieverService server. + """ + return request, metadata + + def post_batch_create_chunks( + self, response: retriever_service.BatchCreateChunksResponse + ) -> retriever_service.BatchCreateChunksResponse: + """Post-rpc interceptor for batch_create_chunks + + Override in a subclass to manipulate the response + after it is returned by the RetrieverService server but before + it is returned to user code. + """ + return response + + def pre_batch_delete_chunks( + self, + request: retriever_service.BatchDeleteChunksRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[retriever_service.BatchDeleteChunksRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for batch_delete_chunks + + Override in a subclass to manipulate the request or metadata + before they are sent to the RetrieverService server. + """ + return request, metadata + + def pre_batch_update_chunks( + self, + request: retriever_service.BatchUpdateChunksRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[retriever_service.BatchUpdateChunksRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for batch_update_chunks + + Override in a subclass to manipulate the request or metadata + before they are sent to the RetrieverService server. + """ + return request, metadata + + def post_batch_update_chunks( + self, response: retriever_service.BatchUpdateChunksResponse + ) -> retriever_service.BatchUpdateChunksResponse: + """Post-rpc interceptor for batch_update_chunks + + Override in a subclass to manipulate the response + after it is returned by the RetrieverService server but before + it is returned to user code. + """ + return response + + def pre_create_chunk( + self, + request: retriever_service.CreateChunkRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[retriever_service.CreateChunkRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for create_chunk + + Override in a subclass to manipulate the request or metadata + before they are sent to the RetrieverService server. + """ + return request, metadata + + def post_create_chunk(self, response: retriever.Chunk) -> retriever.Chunk: + """Post-rpc interceptor for create_chunk + + Override in a subclass to manipulate the response + after it is returned by the RetrieverService server but before + it is returned to user code. + """ + return response + + def pre_create_corpus( + self, + request: retriever_service.CreateCorpusRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[retriever_service.CreateCorpusRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for create_corpus + + Override in a subclass to manipulate the request or metadata + before they are sent to the RetrieverService server. + """ + return request, metadata + + def post_create_corpus(self, response: retriever.Corpus) -> retriever.Corpus: + """Post-rpc interceptor for create_corpus + + Override in a subclass to manipulate the response + after it is returned by the RetrieverService server but before + it is returned to user code. + """ + return response + + def pre_create_document( + self, + request: retriever_service.CreateDocumentRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[retriever_service.CreateDocumentRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for create_document + + Override in a subclass to manipulate the request or metadata + before they are sent to the RetrieverService server. + """ + return request, metadata + + def post_create_document(self, response: retriever.Document) -> retriever.Document: + """Post-rpc interceptor for create_document + + Override in a subclass to manipulate the response + after it is returned by the RetrieverService server but before + it is returned to user code. + """ + return response + + def pre_delete_chunk( + self, + request: retriever_service.DeleteChunkRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[retriever_service.DeleteChunkRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for delete_chunk + + Override in a subclass to manipulate the request or metadata + before they are sent to the RetrieverService server. + """ + return request, metadata + + def pre_delete_corpus( + self, + request: retriever_service.DeleteCorpusRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[retriever_service.DeleteCorpusRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for delete_corpus + + Override in a subclass to manipulate the request or metadata + before they are sent to the RetrieverService server. + """ + return request, metadata + + def pre_delete_document( + self, + request: retriever_service.DeleteDocumentRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[retriever_service.DeleteDocumentRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for delete_document + + Override in a subclass to manipulate the request or metadata + before they are sent to the RetrieverService server. + """ + return request, metadata + + def pre_get_chunk( + self, + request: retriever_service.GetChunkRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[retriever_service.GetChunkRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_chunk + + Override in a subclass to manipulate the request or metadata + before they are sent to the RetrieverService server. + """ + return request, metadata + + def post_get_chunk(self, response: retriever.Chunk) -> retriever.Chunk: + """Post-rpc interceptor for get_chunk + + Override in a subclass to manipulate the response + after it is returned by the RetrieverService server but before + it is returned to user code. + """ + return response + + def pre_get_corpus( + self, + request: retriever_service.GetCorpusRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[retriever_service.GetCorpusRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_corpus + + Override in a subclass to manipulate the request or metadata + before they are sent to the RetrieverService server. + """ + return request, metadata + + def post_get_corpus(self, response: retriever.Corpus) -> retriever.Corpus: + """Post-rpc interceptor for get_corpus + + Override in a subclass to manipulate the response + after it is returned by the RetrieverService server but before + it is returned to user code. + """ + return response + + def pre_get_document( + self, + request: retriever_service.GetDocumentRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[retriever_service.GetDocumentRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_document + + Override in a subclass to manipulate the request or metadata + before they are sent to the RetrieverService server. + """ + return request, metadata + + def post_get_document(self, response: retriever.Document) -> retriever.Document: + """Post-rpc interceptor for get_document + + Override in a subclass to manipulate the response + after it is returned by the RetrieverService server but before + it is returned to user code. + """ + return response + + def pre_list_chunks( + self, + request: retriever_service.ListChunksRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[retriever_service.ListChunksRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_chunks + + Override in a subclass to manipulate the request or metadata + before they are sent to the RetrieverService server. + """ + return request, metadata + + def post_list_chunks( + self, response: retriever_service.ListChunksResponse + ) -> retriever_service.ListChunksResponse: + """Post-rpc interceptor for list_chunks + + Override in a subclass to manipulate the response + after it is returned by the RetrieverService server but before + it is returned to user code. + """ + return response + + def pre_list_corpora( + self, + request: retriever_service.ListCorporaRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[retriever_service.ListCorporaRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_corpora + + Override in a subclass to manipulate the request or metadata + before they are sent to the RetrieverService server. + """ + return request, metadata + + def post_list_corpora( + self, response: retriever_service.ListCorporaResponse + ) -> retriever_service.ListCorporaResponse: + """Post-rpc interceptor for list_corpora + + Override in a subclass to manipulate the response + after it is returned by the RetrieverService server but before + it is returned to user code. + """ + return response + + def pre_list_documents( + self, + request: retriever_service.ListDocumentsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[retriever_service.ListDocumentsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_documents + + Override in a subclass to manipulate the request or metadata + before they are sent to the RetrieverService server. + """ + return request, metadata + + def post_list_documents( + self, response: retriever_service.ListDocumentsResponse + ) -> retriever_service.ListDocumentsResponse: + """Post-rpc interceptor for list_documents + + Override in a subclass to manipulate the response + after it is returned by the RetrieverService server but before + it is returned to user code. + """ + return response + + def pre_query_corpus( + self, + request: retriever_service.QueryCorpusRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[retriever_service.QueryCorpusRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for query_corpus + + Override in a subclass to manipulate the request or metadata + before they are sent to the RetrieverService server. + """ + return request, metadata + + def post_query_corpus( + self, response: retriever_service.QueryCorpusResponse + ) -> retriever_service.QueryCorpusResponse: + """Post-rpc interceptor for query_corpus + + Override in a subclass to manipulate the response + after it is returned by the RetrieverService server but before + it is returned to user code. + """ + return response + + def pre_query_document( + self, + request: retriever_service.QueryDocumentRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[retriever_service.QueryDocumentRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for query_document + + Override in a subclass to manipulate the request or metadata + before they are sent to the RetrieverService server. + """ + return request, metadata + + def post_query_document( + self, response: retriever_service.QueryDocumentResponse + ) -> retriever_service.QueryDocumentResponse: + """Post-rpc interceptor for query_document + + Override in a subclass to manipulate the response + after it is returned by the RetrieverService server but before + it is returned to user code. + """ + return response + + def pre_update_chunk( + self, + request: retriever_service.UpdateChunkRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[retriever_service.UpdateChunkRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for update_chunk + + Override in a subclass to manipulate the request or metadata + before they are sent to the RetrieverService server. + """ + return request, metadata + + def post_update_chunk(self, response: retriever.Chunk) -> retriever.Chunk: + """Post-rpc interceptor for update_chunk + + Override in a subclass to manipulate the response + after it is returned by the RetrieverService server but before + it is returned to user code. + """ + return response + + def pre_update_corpus( + self, + request: retriever_service.UpdateCorpusRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[retriever_service.UpdateCorpusRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for update_corpus + + Override in a subclass to manipulate the request or metadata + before they are sent to the RetrieverService server. + """ + return request, metadata + + def post_update_corpus(self, response: retriever.Corpus) -> retriever.Corpus: + """Post-rpc interceptor for update_corpus + + Override in a subclass to manipulate the response + after it is returned by the RetrieverService server but before + it is returned to user code. + """ + return response + + def pre_update_document( + self, + request: retriever_service.UpdateDocumentRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[retriever_service.UpdateDocumentRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for update_document + + Override in a subclass to manipulate the request or metadata + before they are sent to the RetrieverService server. + """ + return request, metadata + + def post_update_document(self, response: retriever.Document) -> retriever.Document: + """Post-rpc interceptor for update_document + + Override in a subclass to manipulate the response + after it is returned by the RetrieverService server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class RetrieverServiceRestStub: + _session: AuthorizedSession + _host: str + _interceptor: RetrieverServiceRestInterceptor + + +class RetrieverServiceRestTransport(RetrieverServiceTransport): + """REST backend transport for RetrieverService. + + An API for semantic search over a corpus of user uploaded + content. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + + """ + + def __init__( + self, + *, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + interceptor: Optional[RetrieverServiceRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError( + f"Unexpected hostname structure: {host}" + ) # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST + ) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or RetrieverServiceRestInterceptor() + self._prep_wrapped_messages(client_info) + + class _BatchCreateChunks(RetrieverServiceRestStub): + def __hash__(self): + return hash("BatchCreateChunks") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: retriever_service.BatchCreateChunksRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever_service.BatchCreateChunksResponse: + r"""Call the batch create chunks method over HTTP. + + Args: + request (~.retriever_service.BatchCreateChunksRequest): + The request object. Request to batch create ``Chunk``\ s. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.retriever_service.BatchCreateChunksResponse: + Response from ``BatchCreateChunks`` containing a list of + created ``Chunk``\ s. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta/{parent=corpora/*/documents/*}/chunks:batchCreate", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_batch_create_chunks( + request, metadata + ) + pb_request = retriever_service.BatchCreateChunksRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = retriever_service.BatchCreateChunksResponse() + pb_resp = retriever_service.BatchCreateChunksResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_batch_create_chunks(resp) + return resp + + class _BatchDeleteChunks(RetrieverServiceRestStub): + def __hash__(self): + return hash("BatchDeleteChunks") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: retriever_service.BatchDeleteChunksRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ): + r"""Call the batch delete chunks method over HTTP. + + Args: + request (~.retriever_service.BatchDeleteChunksRequest): + The request object. Request to batch delete ``Chunk``\ s. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta/{parent=corpora/*/documents/*}/chunks:batchDelete", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_batch_delete_chunks( + request, metadata + ) + pb_request = retriever_service.BatchDeleteChunksRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + class _BatchUpdateChunks(RetrieverServiceRestStub): + def __hash__(self): + return hash("BatchUpdateChunks") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: retriever_service.BatchUpdateChunksRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever_service.BatchUpdateChunksResponse: + r"""Call the batch update chunks method over HTTP. + + Args: + request (~.retriever_service.BatchUpdateChunksRequest): + The request object. Request to batch update ``Chunk``\ s. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.retriever_service.BatchUpdateChunksResponse: + Response from ``BatchUpdateChunks`` containing a list of + updated ``Chunk``\ s. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta/{parent=corpora/*/documents/*}/chunks:batchUpdate", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_batch_update_chunks( + request, metadata + ) + pb_request = retriever_service.BatchUpdateChunksRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = retriever_service.BatchUpdateChunksResponse() + pb_resp = retriever_service.BatchUpdateChunksResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_batch_update_chunks(resp) + return resp + + class _CreateChunk(RetrieverServiceRestStub): + def __hash__(self): + return hash("CreateChunk") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: retriever_service.CreateChunkRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever.Chunk: + r"""Call the create chunk method over HTTP. + + Args: + request (~.retriever_service.CreateChunkRequest): + The request object. Request to create a ``Chunk``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.retriever.Chunk: + A ``Chunk`` is a subpart of a ``Document`` that is + treated as an independent unit for the purposes of + vector representation and storage. A ``Corpus`` can have + a maximum of 1 million ``Chunk``\ s. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta/{parent=corpora/*/documents/*}/chunks", + "body": "chunk", + }, + ] + request, metadata = self._interceptor.pre_create_chunk(request, metadata) + pb_request = retriever_service.CreateChunkRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = retriever.Chunk() + pb_resp = retriever.Chunk.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_chunk(resp) + return resp + + class _CreateCorpus(RetrieverServiceRestStub): + def __hash__(self): + return hash("CreateCorpus") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: retriever_service.CreateCorpusRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever.Corpus: + r"""Call the create corpus method over HTTP. + + Args: + request (~.retriever_service.CreateCorpusRequest): + The request object. Request to create a ``Corpus``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.retriever.Corpus: + A ``Corpus`` is a collection of ``Document``\ s. A + project can create up to 5 corpora. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta/corpora", + "body": "corpus", + }, + ] + request, metadata = self._interceptor.pre_create_corpus(request, metadata) + pb_request = retriever_service.CreateCorpusRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = retriever.Corpus() + pb_resp = retriever.Corpus.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_corpus(resp) + return resp + + class _CreateDocument(RetrieverServiceRestStub): + def __hash__(self): + return hash("CreateDocument") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: retriever_service.CreateDocumentRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever.Document: + r"""Call the create document method over HTTP. + + Args: + request (~.retriever_service.CreateDocumentRequest): + The request object. Request to create a ``Document``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.retriever.Document: + A ``Document`` is a collection of ``Chunk``\ s. A + ``Corpus`` can have a maximum of 10,000 ``Document``\ s. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta/{parent=corpora/*}/documents", + "body": "document", + }, + ] + request, metadata = self._interceptor.pre_create_document(request, metadata) + pb_request = retriever_service.CreateDocumentRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = retriever.Document() + pb_resp = retriever.Document.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_document(resp) + return resp + + class _DeleteChunk(RetrieverServiceRestStub): + def __hash__(self): + return hash("DeleteChunk") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: retriever_service.DeleteChunkRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ): + r"""Call the delete chunk method over HTTP. + + Args: + request (~.retriever_service.DeleteChunkRequest): + The request object. Request to delete a ``Chunk``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v1beta/{name=corpora/*/documents/*/chunks/*}", + }, + ] + request, metadata = self._interceptor.pre_delete_chunk(request, metadata) + pb_request = retriever_service.DeleteChunkRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + class _DeleteCorpus(RetrieverServiceRestStub): + def __hash__(self): + return hash("DeleteCorpus") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: retriever_service.DeleteCorpusRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ): + r"""Call the delete corpus method over HTTP. + + Args: + request (~.retriever_service.DeleteCorpusRequest): + The request object. Request to delete a ``Corpus``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v1beta/{name=corpora/*}", + }, + ] + request, metadata = self._interceptor.pre_delete_corpus(request, metadata) + pb_request = retriever_service.DeleteCorpusRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + class _DeleteDocument(RetrieverServiceRestStub): + def __hash__(self): + return hash("DeleteDocument") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: retriever_service.DeleteDocumentRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ): + r"""Call the delete document method over HTTP. + + Args: + request (~.retriever_service.DeleteDocumentRequest): + The request object. Request to delete a ``Document``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v1beta/{name=corpora/*/documents/*}", + }, + ] + request, metadata = self._interceptor.pre_delete_document(request, metadata) + pb_request = retriever_service.DeleteDocumentRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + class _GetChunk(RetrieverServiceRestStub): + def __hash__(self): + return hash("GetChunk") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: retriever_service.GetChunkRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever.Chunk: + r"""Call the get chunk method over HTTP. + + Args: + request (~.retriever_service.GetChunkRequest): + The request object. Request for getting information about a specific + ``Chunk``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.retriever.Chunk: + A ``Chunk`` is a subpart of a ``Document`` that is + treated as an independent unit for the purposes of + vector representation and storage. A ``Corpus`` can have + a maximum of 1 million ``Chunk``\ s. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1beta/{name=corpora/*/documents/*/chunks/*}", + }, + ] + request, metadata = self._interceptor.pre_get_chunk(request, metadata) + pb_request = retriever_service.GetChunkRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = retriever.Chunk() + pb_resp = retriever.Chunk.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_chunk(resp) + return resp + + class _GetCorpus(RetrieverServiceRestStub): + def __hash__(self): + return hash("GetCorpus") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: retriever_service.GetCorpusRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever.Corpus: + r"""Call the get corpus method over HTTP. + + Args: + request (~.retriever_service.GetCorpusRequest): + The request object. Request for getting information about a specific + ``Corpus``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.retriever.Corpus: + A ``Corpus`` is a collection of ``Document``\ s. A + project can create up to 5 corpora. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1beta/{name=corpora/*}", + }, + ] + request, metadata = self._interceptor.pre_get_corpus(request, metadata) + pb_request = retriever_service.GetCorpusRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = retriever.Corpus() + pb_resp = retriever.Corpus.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_corpus(resp) + return resp + + class _GetDocument(RetrieverServiceRestStub): + def __hash__(self): + return hash("GetDocument") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: retriever_service.GetDocumentRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever.Document: + r"""Call the get document method over HTTP. + + Args: + request (~.retriever_service.GetDocumentRequest): + The request object. Request for getting information about a specific + ``Document``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.retriever.Document: + A ``Document`` is a collection of ``Chunk``\ s. A + ``Corpus`` can have a maximum of 10,000 ``Document``\ s. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1beta/{name=corpora/*/documents/*}", + }, + ] + request, metadata = self._interceptor.pre_get_document(request, metadata) + pb_request = retriever_service.GetDocumentRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = retriever.Document() + pb_resp = retriever.Document.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_document(resp) + return resp + + class _ListChunks(RetrieverServiceRestStub): + def __hash__(self): + return hash("ListChunks") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: retriever_service.ListChunksRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever_service.ListChunksResponse: + r"""Call the list chunks method over HTTP. + + Args: + request (~.retriever_service.ListChunksRequest): + The request object. Request for listing ``Chunk``\ s. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.retriever_service.ListChunksResponse: + Response from ``ListChunks`` containing a paginated list + of ``Chunk``\ s. The ``Chunk``\ s are sorted by + ascending ``chunk.create_time``. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1beta/{parent=corpora/*/documents/*}/chunks", + }, + ] + request, metadata = self._interceptor.pre_list_chunks(request, metadata) + pb_request = retriever_service.ListChunksRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = retriever_service.ListChunksResponse() + pb_resp = retriever_service.ListChunksResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_chunks(resp) + return resp + + class _ListCorpora(RetrieverServiceRestStub): + def __hash__(self): + return hash("ListCorpora") + + def __call__( + self, + request: retriever_service.ListCorporaRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever_service.ListCorporaResponse: + r"""Call the list corpora method over HTTP. + + Args: + request (~.retriever_service.ListCorporaRequest): + The request object. Request for listing ``Corpora``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.retriever_service.ListCorporaResponse: + Response from ``ListCorpora`` containing a paginated + list of ``Corpora``. The results are sorted by ascending + ``corpus.create_time``. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1beta/corpora", + }, + ] + request, metadata = self._interceptor.pre_list_corpora(request, metadata) + pb_request = retriever_service.ListCorporaRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = retriever_service.ListCorporaResponse() + pb_resp = retriever_service.ListCorporaResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_corpora(resp) + return resp + + class _ListDocuments(RetrieverServiceRestStub): + def __hash__(self): + return hash("ListDocuments") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: retriever_service.ListDocumentsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever_service.ListDocumentsResponse: + r"""Call the list documents method over HTTP. + + Args: + request (~.retriever_service.ListDocumentsRequest): + The request object. Request for listing ``Document``\ s. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.retriever_service.ListDocumentsResponse: + Response from ``ListDocuments`` containing a paginated + list of ``Document``\ s. The ``Document``\ s are sorted + by ascending ``document.create_time``. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1beta/{parent=corpora/*}/documents", + }, + ] + request, metadata = self._interceptor.pre_list_documents(request, metadata) + pb_request = retriever_service.ListDocumentsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = retriever_service.ListDocumentsResponse() + pb_resp = retriever_service.ListDocumentsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_documents(resp) + return resp + + class _QueryCorpus(RetrieverServiceRestStub): + def __hash__(self): + return hash("QueryCorpus") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: retriever_service.QueryCorpusRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever_service.QueryCorpusResponse: + r"""Call the query corpus method over HTTP. + + Args: + request (~.retriever_service.QueryCorpusRequest): + The request object. Request for querying a ``Corpus``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.retriever_service.QueryCorpusResponse: + Response from ``QueryCorpus`` containing a list of + relevant chunks. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta/{name=corpora/*}:query", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_query_corpus(request, metadata) + pb_request = retriever_service.QueryCorpusRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = retriever_service.QueryCorpusResponse() + pb_resp = retriever_service.QueryCorpusResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_query_corpus(resp) + return resp + + class _QueryDocument(RetrieverServiceRestStub): + def __hash__(self): + return hash("QueryDocument") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: retriever_service.QueryDocumentRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever_service.QueryDocumentResponse: + r"""Call the query document method over HTTP. + + Args: + request (~.retriever_service.QueryDocumentRequest): + The request object. Request for querying a ``Document``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.retriever_service.QueryDocumentResponse: + Response from ``QueryDocument`` containing a list of + relevant chunks. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta/{name=corpora/*/documents/*}:query", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_query_document(request, metadata) + pb_request = retriever_service.QueryDocumentRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = retriever_service.QueryDocumentResponse() + pb_resp = retriever_service.QueryDocumentResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_query_document(resp) + return resp + + class _UpdateChunk(RetrieverServiceRestStub): + def __hash__(self): + return hash("UpdateChunk") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "updateMask": {}, + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: retriever_service.UpdateChunkRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever.Chunk: + r"""Call the update chunk method over HTTP. + + Args: + request (~.retriever_service.UpdateChunkRequest): + The request object. Request to update a ``Chunk``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.retriever.Chunk: + A ``Chunk`` is a subpart of a ``Document`` that is + treated as an independent unit for the purposes of + vector representation and storage. A ``Corpus`` can have + a maximum of 1 million ``Chunk``\ s. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v1beta/{chunk.name=corpora/*/documents/*/chunks/*}", + "body": "chunk", + }, + ] + request, metadata = self._interceptor.pre_update_chunk(request, metadata) + pb_request = retriever_service.UpdateChunkRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = retriever.Chunk() + pb_resp = retriever.Chunk.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_chunk(resp) + return resp + + class _UpdateCorpus(RetrieverServiceRestStub): + def __hash__(self): + return hash("UpdateCorpus") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "updateMask": {}, + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: retriever_service.UpdateCorpusRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever.Corpus: + r"""Call the update corpus method over HTTP. + + Args: + request (~.retriever_service.UpdateCorpusRequest): + The request object. Request to update a ``Corpus``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.retriever.Corpus: + A ``Corpus`` is a collection of ``Document``\ s. A + project can create up to 5 corpora. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v1beta/{corpus.name=corpora/*}", + "body": "corpus", + }, + ] + request, metadata = self._interceptor.pre_update_corpus(request, metadata) + pb_request = retriever_service.UpdateCorpusRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = retriever.Corpus() + pb_resp = retriever.Corpus.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_corpus(resp) + return resp + + class _UpdateDocument(RetrieverServiceRestStub): + def __hash__(self): + return hash("UpdateDocument") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "updateMask": {}, + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: retriever_service.UpdateDocumentRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> retriever.Document: + r"""Call the update document method over HTTP. + + Args: + request (~.retriever_service.UpdateDocumentRequest): + The request object. Request to update a ``Document``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.retriever.Document: + A ``Document`` is a collection of ``Chunk``\ s. A + ``Corpus`` can have a maximum of 10,000 ``Document``\ s. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v1beta/{document.name=corpora/*/documents/*}", + "body": "document", + }, + ] + request, metadata = self._interceptor.pre_update_document(request, metadata) + pb_request = retriever_service.UpdateDocumentRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = retriever.Document() + pb_resp = retriever.Document.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_document(resp) + return resp + + @property + def batch_create_chunks( + self, + ) -> Callable[ + [retriever_service.BatchCreateChunksRequest], + retriever_service.BatchCreateChunksResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._BatchCreateChunks(self._session, self._host, self._interceptor) # type: ignore + + @property + def batch_delete_chunks( + self, + ) -> Callable[[retriever_service.BatchDeleteChunksRequest], empty_pb2.Empty]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._BatchDeleteChunks(self._session, self._host, self._interceptor) # type: ignore + + @property + def batch_update_chunks( + self, + ) -> Callable[ + [retriever_service.BatchUpdateChunksRequest], + retriever_service.BatchUpdateChunksResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._BatchUpdateChunks(self._session, self._host, self._interceptor) # type: ignore + + @property + def create_chunk( + self, + ) -> Callable[[retriever_service.CreateChunkRequest], retriever.Chunk]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateChunk(self._session, self._host, self._interceptor) # type: ignore + + @property + def create_corpus( + self, + ) -> Callable[[retriever_service.CreateCorpusRequest], retriever.Corpus]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateCorpus(self._session, self._host, self._interceptor) # type: ignore + + @property + def create_document( + self, + ) -> Callable[[retriever_service.CreateDocumentRequest], retriever.Document]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateDocument(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_chunk( + self, + ) -> Callable[[retriever_service.DeleteChunkRequest], empty_pb2.Empty]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteChunk(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_corpus( + self, + ) -> Callable[[retriever_service.DeleteCorpusRequest], empty_pb2.Empty]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteCorpus(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_document( + self, + ) -> Callable[[retriever_service.DeleteDocumentRequest], empty_pb2.Empty]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteDocument(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_chunk( + self, + ) -> Callable[[retriever_service.GetChunkRequest], retriever.Chunk]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetChunk(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_corpus( + self, + ) -> Callable[[retriever_service.GetCorpusRequest], retriever.Corpus]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetCorpus(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_document( + self, + ) -> Callable[[retriever_service.GetDocumentRequest], retriever.Document]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetDocument(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_chunks( + self, + ) -> Callable[ + [retriever_service.ListChunksRequest], retriever_service.ListChunksResponse + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListChunks(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_corpora( + self, + ) -> Callable[ + [retriever_service.ListCorporaRequest], retriever_service.ListCorporaResponse + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListCorpora(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_documents( + self, + ) -> Callable[ + [retriever_service.ListDocumentsRequest], + retriever_service.ListDocumentsResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListDocuments(self._session, self._host, self._interceptor) # type: ignore + + @property + def query_corpus( + self, + ) -> Callable[ + [retriever_service.QueryCorpusRequest], retriever_service.QueryCorpusResponse + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._QueryCorpus(self._session, self._host, self._interceptor) # type: ignore + + @property + def query_document( + self, + ) -> Callable[ + [retriever_service.QueryDocumentRequest], + retriever_service.QueryDocumentResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._QueryDocument(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_chunk( + self, + ) -> Callable[[retriever_service.UpdateChunkRequest], retriever.Chunk]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateChunk(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_corpus( + self, + ) -> Callable[[retriever_service.UpdateCorpusRequest], retriever.Corpus]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateCorpus(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_document( + self, + ) -> Callable[[retriever_service.UpdateDocumentRequest], retriever.Document]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateDocument(self._session, self._host, self._interceptor) # type: ignore + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__ = ("RetrieverServiceRestTransport",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/text_service/__init__.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/text_service/__init__.py new file mode 100644 index 000000000000..f705e582e7a1 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/text_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .async_client import TextServiceAsyncClient +from .client import TextServiceClient + +__all__ = ( + "TextServiceClient", + "TextServiceAsyncClient", +) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/text_service/async_client.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/text_service/async_client.py new file mode 100644 index 000000000000..ecc68e1f50ab --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/text_service/async_client.py @@ -0,0 +1,835 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry_async as retries +from google.api_core.client_options import ClientOptions +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.ai.generativelanguage_v1beta import gapic_version as package_version + +try: + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore + +from google.longrunning import operations_pb2 # type: ignore + +from google.ai.generativelanguage_v1beta.types import safety, text_service + +from .client import TextServiceClient +from .transports.base import DEFAULT_CLIENT_INFO, TextServiceTransport +from .transports.grpc_asyncio import TextServiceGrpcAsyncIOTransport + + +class TextServiceAsyncClient: + """API for using Generative Language Models (GLMs) trained to + generate text. + Also known as Large Language Models (LLM)s, these generate text + given an input prompt from the user. + """ + + _client: TextServiceClient + + DEFAULT_ENDPOINT = TextServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = TextServiceClient.DEFAULT_MTLS_ENDPOINT + + model_path = staticmethod(TextServiceClient.model_path) + parse_model_path = staticmethod(TextServiceClient.parse_model_path) + common_billing_account_path = staticmethod( + TextServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + TextServiceClient.parse_common_billing_account_path + ) + common_folder_path = staticmethod(TextServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(TextServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(TextServiceClient.common_organization_path) + parse_common_organization_path = staticmethod( + TextServiceClient.parse_common_organization_path + ) + common_project_path = staticmethod(TextServiceClient.common_project_path) + parse_common_project_path = staticmethod( + TextServiceClient.parse_common_project_path + ) + common_location_path = staticmethod(TextServiceClient.common_location_path) + parse_common_location_path = staticmethod( + TextServiceClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TextServiceAsyncClient: The constructed client. + """ + return TextServiceClient.from_service_account_info.__func__(TextServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TextServiceAsyncClient: The constructed client. + """ + return TextServiceClient.from_service_account_file.__func__(TextServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return TextServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> TextServiceTransport: + """Returns the transport used by the client instance. + + Returns: + TextServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial( + type(TextServiceClient).get_transport_class, type(TextServiceClient) + ) + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, TextServiceTransport] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the text service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.TextServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = TextServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def generate_text( + self, + request: Optional[Union[text_service.GenerateTextRequest, dict]] = None, + *, + model: Optional[str] = None, + prompt: Optional[text_service.TextPrompt] = None, + temperature: Optional[float] = None, + candidate_count: Optional[int] = None, + max_output_tokens: Optional[int] = None, + top_p: Optional[float] = None, + top_k: Optional[int] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> text_service.GenerateTextResponse: + r"""Generates a response from the model given an input + message. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_generate_text(): + # Create a client + client = generativelanguage_v1beta.TextServiceAsyncClient() + + # Initialize request argument(s) + prompt = generativelanguage_v1beta.TextPrompt() + prompt.text = "text_value" + + request = generativelanguage_v1beta.GenerateTextRequest( + model="model_value", + prompt=prompt, + ) + + # Make the request + response = await client.generate_text(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.GenerateTextRequest, dict]]): + The request object. Request to generate a text completion + response from the model. + model (:class:`str`): + Required. The name of the ``Model`` or ``TunedModel`` to + use for generating the completion. Examples: + models/text-bison-001 + tunedModels/sentence-translator-u3b7m + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + prompt (:class:`google.ai.generativelanguage_v1beta.types.TextPrompt`): + Required. The free-form input text + given to the model as a prompt. + Given a prompt, the model will generate + a TextCompletion response it predicts as + the completion of the input text. + + This corresponds to the ``prompt`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + temperature (:class:`float`): + Optional. Controls the randomness of the output. Note: + The default value varies by model, see the + ``Model.temperature`` attribute of the ``Model`` + returned the ``getModel`` function. + + Values can range from [0.0,1.0], inclusive. A value + closer to 1.0 will produce responses that are more + varied and creative, while a value closer to 0.0 will + typically result in more straightforward responses from + the model. + + This corresponds to the ``temperature`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + candidate_count (:class:`int`): + Optional. Number of generated responses to return. + + This value must be between [1, 8], inclusive. If unset, + this will default to 1. + + This corresponds to the ``candidate_count`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + max_output_tokens (:class:`int`): + Optional. The maximum number of tokens to include in a + candidate. + + If unset, this will default to output_token_limit + specified in the ``Model`` specification. + + This corresponds to the ``max_output_tokens`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + top_p (:class:`float`): + Optional. The maximum cumulative probability of tokens + to consider when sampling. + + The model uses combined Top-k and nucleus sampling. + + Tokens are sorted based on their assigned probabilities + so that only the most likely tokens are considered. + Top-k sampling directly limits the maximum number of + tokens to consider, while Nucleus sampling limits number + of tokens based on the cumulative probability. + + Note: The default value varies by model, see the + ``Model.top_p`` attribute of the ``Model`` returned the + ``getModel`` function. + + This corresponds to the ``top_p`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + top_k (:class:`int`): + Optional. The maximum number of tokens to consider when + sampling. + + The model uses combined Top-k and nucleus sampling. + + Top-k sampling considers the set of ``top_k`` most + probable tokens. Defaults to 40. + + Note: The default value varies by model, see the + ``Model.top_k`` attribute of the ``Model`` returned the + ``getModel`` function. + + This corresponds to the ``top_k`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.GenerateTextResponse: + The response from the model, + including candidate completions. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any( + [ + model, + prompt, + temperature, + candidate_count, + max_output_tokens, + top_p, + top_k, + ] + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = text_service.GenerateTextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if prompt is not None: + request.prompt = prompt + if temperature is not None: + request.temperature = temperature + if candidate_count is not None: + request.candidate_count = candidate_count + if max_output_tokens is not None: + request.max_output_tokens = max_output_tokens + if top_p is not None: + request.top_p = top_p + if top_k is not None: + request.top_k = top_k + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.generate_text, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def embed_text( + self, + request: Optional[Union[text_service.EmbedTextRequest, dict]] = None, + *, + model: Optional[str] = None, + text: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> text_service.EmbedTextResponse: + r"""Generates an embedding from the model given an input + message. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_embed_text(): + # Create a client + client = generativelanguage_v1beta.TextServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.EmbedTextRequest( + model="model_value", + ) + + # Make the request + response = await client.embed_text(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.EmbedTextRequest, dict]]): + The request object. Request to get a text embedding from + the model. + model (:class:`str`): + Required. The model name to use with + the format model=models/{model}. + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + text (:class:`str`): + Optional. The free-form input text + that the model will turn into an + embedding. + + This corresponds to the ``text`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.EmbedTextResponse: + The response to a EmbedTextRequest. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, text]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = text_service.EmbedTextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if text is not None: + request.text = text + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.embed_text, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def batch_embed_text( + self, + request: Optional[Union[text_service.BatchEmbedTextRequest, dict]] = None, + *, + model: Optional[str] = None, + texts: Optional[MutableSequence[str]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> text_service.BatchEmbedTextResponse: + r"""Generates multiple embeddings from the model given + input text in a synchronous call. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_batch_embed_text(): + # Create a client + client = generativelanguage_v1beta.TextServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.BatchEmbedTextRequest( + model="model_value", + ) + + # Make the request + response = await client.batch_embed_text(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.BatchEmbedTextRequest, dict]]): + The request object. Batch request to get a text embedding + from the model. + model (:class:`str`): + Required. The name of the ``Model`` to use for + generating the embedding. Examples: + models/embedding-gecko-001 + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + texts (:class:`MutableSequence[str]`): + Optional. The free-form input texts + that the model will turn into an + embedding. The current limit is 100 + texts, over which an error will be + thrown. + + This corresponds to the ``texts`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.BatchEmbedTextResponse: + The response to a EmbedTextRequest. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, texts]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = text_service.BatchEmbedTextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if texts: + request.texts.extend(texts) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_embed_text, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def count_text_tokens( + self, + request: Optional[Union[text_service.CountTextTokensRequest, dict]] = None, + *, + model: Optional[str] = None, + prompt: Optional[text_service.TextPrompt] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> text_service.CountTextTokensResponse: + r"""Runs a model's tokenizer on a text and returns the + token count. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + async def sample_count_text_tokens(): + # Create a client + client = generativelanguage_v1beta.TextServiceAsyncClient() + + # Initialize request argument(s) + prompt = generativelanguage_v1beta.TextPrompt() + prompt.text = "text_value" + + request = generativelanguage_v1beta.CountTextTokensRequest( + model="model_value", + prompt=prompt, + ) + + # Make the request + response = await client.count_text_tokens(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.ai.generativelanguage_v1beta.types.CountTextTokensRequest, dict]]): + The request object. Counts the number of tokens in the ``prompt`` sent to a + model. + + Models may tokenize text differently, so each model may + return a different ``token_count``. + model (:class:`str`): + Required. The model's resource name. This serves as an + ID for the Model to use. + + This name should match a model name returned by the + ``ListModels`` method. + + Format: ``models/{model}`` + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + prompt (:class:`google.ai.generativelanguage_v1beta.types.TextPrompt`): + Required. The free-form input text + given to the model as a prompt. + + This corresponds to the ``prompt`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.CountTextTokensResponse: + A response from CountTextTokens. + + It returns the model's token_count for the prompt. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, prompt]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = text_service.CountTextTokensRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if prompt is not None: + request.prompt = prompt + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.count_text_tokens, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def __aenter__(self) -> "TextServiceAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("TextServiceAsyncClient",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/text_service/client.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/text_service/client.py new file mode 100644 index 000000000000..bcdf9bb33a6d --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/text_service/client.py @@ -0,0 +1,1030 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.ai.generativelanguage_v1beta import gapic_version as package_version + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.longrunning import operations_pb2 # type: ignore + +from google.ai.generativelanguage_v1beta.types import safety, text_service + +from .transports.base import DEFAULT_CLIENT_INFO, TextServiceTransport +from .transports.grpc import TextServiceGrpcTransport +from .transports.grpc_asyncio import TextServiceGrpcAsyncIOTransport +from .transports.rest import TextServiceRestTransport + + +class TextServiceClientMeta(type): + """Metaclass for the TextService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = OrderedDict() # type: Dict[str, Type[TextServiceTransport]] + _transport_registry["grpc"] = TextServiceGrpcTransport + _transport_registry["grpc_asyncio"] = TextServiceGrpcAsyncIOTransport + _transport_registry["rest"] = TextServiceRestTransport + + def get_transport_class( + cls, + label: Optional[str] = None, + ) -> Type[TextServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class TextServiceClient(metaclass=TextServiceClientMeta): + """API for using Generative Language Models (GLMs) trained to + generate text. + Also known as Large Language Models (LLM)s, these generate text + given an input prompt from the user. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "generativelanguage.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TextServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TextServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> TextServiceTransport: + """Returns the transport used by the client instance. + + Returns: + TextServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def model_path( + model: str, + ) -> str: + """Returns a fully-qualified model string.""" + return "models/{model}".format( + model=model, + ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str, str]: + """Parses a model path into its component segments.""" + m = re.match(r"^models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path( + billing_account: str, + ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path( + folder: str, + ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format( + folder=folder, + ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path( + organization: str, + ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format( + organization=organization, + ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path( + project: str, + ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format( + project=project, + ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path( + project: str, + location: str, + ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[client_options_lib.ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[Union[str, TextServiceTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the text service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, TextServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + client_options = cast(client_options_lib.ClientOptions, client_options) + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( + client_options + ) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError( + "client_options.api_key and credentials are mutually exclusive" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, TextServiceTransport): + # transport is a TextServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr( + google.auth._default, "get_api_key_credentials" + ): + credentials = google.auth._default.get_api_key_credentials( + api_key_value + ) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=client_options.api_audience, + ) + + def generate_text( + self, + request: Optional[Union[text_service.GenerateTextRequest, dict]] = None, + *, + model: Optional[str] = None, + prompt: Optional[text_service.TextPrompt] = None, + temperature: Optional[float] = None, + candidate_count: Optional[int] = None, + max_output_tokens: Optional[int] = None, + top_p: Optional[float] = None, + top_k: Optional[int] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> text_service.GenerateTextResponse: + r"""Generates a response from the model given an input + message. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_generate_text(): + # Create a client + client = generativelanguage_v1beta.TextServiceClient() + + # Initialize request argument(s) + prompt = generativelanguage_v1beta.TextPrompt() + prompt.text = "text_value" + + request = generativelanguage_v1beta.GenerateTextRequest( + model="model_value", + prompt=prompt, + ) + + # Make the request + response = client.generate_text(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.GenerateTextRequest, dict]): + The request object. Request to generate a text completion + response from the model. + model (str): + Required. The name of the ``Model`` or ``TunedModel`` to + use for generating the completion. Examples: + models/text-bison-001 + tunedModels/sentence-translator-u3b7m + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + prompt (google.ai.generativelanguage_v1beta.types.TextPrompt): + Required. The free-form input text + given to the model as a prompt. + Given a prompt, the model will generate + a TextCompletion response it predicts as + the completion of the input text. + + This corresponds to the ``prompt`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + temperature (float): + Optional. Controls the randomness of the output. Note: + The default value varies by model, see the + ``Model.temperature`` attribute of the ``Model`` + returned the ``getModel`` function. + + Values can range from [0.0,1.0], inclusive. A value + closer to 1.0 will produce responses that are more + varied and creative, while a value closer to 0.0 will + typically result in more straightforward responses from + the model. + + This corresponds to the ``temperature`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + candidate_count (int): + Optional. Number of generated responses to return. + + This value must be between [1, 8], inclusive. If unset, + this will default to 1. + + This corresponds to the ``candidate_count`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + max_output_tokens (int): + Optional. The maximum number of tokens to include in a + candidate. + + If unset, this will default to output_token_limit + specified in the ``Model`` specification. + + This corresponds to the ``max_output_tokens`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + top_p (float): + Optional. The maximum cumulative probability of tokens + to consider when sampling. + + The model uses combined Top-k and nucleus sampling. + + Tokens are sorted based on their assigned probabilities + so that only the most likely tokens are considered. + Top-k sampling directly limits the maximum number of + tokens to consider, while Nucleus sampling limits number + of tokens based on the cumulative probability. + + Note: The default value varies by model, see the + ``Model.top_p`` attribute of the ``Model`` returned the + ``getModel`` function. + + This corresponds to the ``top_p`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + top_k (int): + Optional. The maximum number of tokens to consider when + sampling. + + The model uses combined Top-k and nucleus sampling. + + Top-k sampling considers the set of ``top_k`` most + probable tokens. Defaults to 40. + + Note: The default value varies by model, see the + ``Model.top_k`` attribute of the ``Model`` returned the + ``getModel`` function. + + This corresponds to the ``top_k`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.GenerateTextResponse: + The response from the model, + including candidate completions. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any( + [ + model, + prompt, + temperature, + candidate_count, + max_output_tokens, + top_p, + top_k, + ] + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a text_service.GenerateTextRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, text_service.GenerateTextRequest): + request = text_service.GenerateTextRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if prompt is not None: + request.prompt = prompt + if temperature is not None: + request.temperature = temperature + if candidate_count is not None: + request.candidate_count = candidate_count + if max_output_tokens is not None: + request.max_output_tokens = max_output_tokens + if top_p is not None: + request.top_p = top_p + if top_k is not None: + request.top_k = top_k + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.generate_text] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def embed_text( + self, + request: Optional[Union[text_service.EmbedTextRequest, dict]] = None, + *, + model: Optional[str] = None, + text: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> text_service.EmbedTextResponse: + r"""Generates an embedding from the model given an input + message. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_embed_text(): + # Create a client + client = generativelanguage_v1beta.TextServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.EmbedTextRequest( + model="model_value", + ) + + # Make the request + response = client.embed_text(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.EmbedTextRequest, dict]): + The request object. Request to get a text embedding from + the model. + model (str): + Required. The model name to use with + the format model=models/{model}. + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + text (str): + Optional. The free-form input text + that the model will turn into an + embedding. + + This corresponds to the ``text`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.EmbedTextResponse: + The response to a EmbedTextRequest. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, text]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a text_service.EmbedTextRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, text_service.EmbedTextRequest): + request = text_service.EmbedTextRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if text is not None: + request.text = text + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.embed_text] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def batch_embed_text( + self, + request: Optional[Union[text_service.BatchEmbedTextRequest, dict]] = None, + *, + model: Optional[str] = None, + texts: Optional[MutableSequence[str]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> text_service.BatchEmbedTextResponse: + r"""Generates multiple embeddings from the model given + input text in a synchronous call. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_batch_embed_text(): + # Create a client + client = generativelanguage_v1beta.TextServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.BatchEmbedTextRequest( + model="model_value", + ) + + # Make the request + response = client.batch_embed_text(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.BatchEmbedTextRequest, dict]): + The request object. Batch request to get a text embedding + from the model. + model (str): + Required. The name of the ``Model`` to use for + generating the embedding. Examples: + models/embedding-gecko-001 + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + texts (MutableSequence[str]): + Optional. The free-form input texts + that the model will turn into an + embedding. The current limit is 100 + texts, over which an error will be + thrown. + + This corresponds to the ``texts`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.BatchEmbedTextResponse: + The response to a EmbedTextRequest. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, texts]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a text_service.BatchEmbedTextRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, text_service.BatchEmbedTextRequest): + request = text_service.BatchEmbedTextRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if texts is not None: + request.texts = texts + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.batch_embed_text] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def count_text_tokens( + self, + request: Optional[Union[text_service.CountTextTokensRequest, dict]] = None, + *, + model: Optional[str] = None, + prompt: Optional[text_service.TextPrompt] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> text_service.CountTextTokensResponse: + r"""Runs a model's tokenizer on a text and returns the + token count. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.ai import generativelanguage_v1beta + + def sample_count_text_tokens(): + # Create a client + client = generativelanguage_v1beta.TextServiceClient() + + # Initialize request argument(s) + prompt = generativelanguage_v1beta.TextPrompt() + prompt.text = "text_value" + + request = generativelanguage_v1beta.CountTextTokensRequest( + model="model_value", + prompt=prompt, + ) + + # Make the request + response = client.count_text_tokens(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.ai.generativelanguage_v1beta.types.CountTextTokensRequest, dict]): + The request object. Counts the number of tokens in the ``prompt`` sent to a + model. + + Models may tokenize text differently, so each model may + return a different ``token_count``. + model (str): + Required. The model's resource name. This serves as an + ID for the Model to use. + + This name should match a model name returned by the + ``ListModels`` method. + + Format: ``models/{model}`` + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + prompt (google.ai.generativelanguage_v1beta.types.TextPrompt): + Required. The free-form input text + given to the model as a prompt. + + This corresponds to the ``prompt`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.ai.generativelanguage_v1beta.types.CountTextTokensResponse: + A response from CountTextTokens. + + It returns the model's token_count for the prompt. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, prompt]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a text_service.CountTextTokensRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, text_service.CountTextTokensRequest): + request = text_service.CountTextTokensRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if prompt is not None: + request.prompt = prompt + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.count_text_tokens] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self) -> "TextServiceClient": + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("TextServiceClient",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/text_service/transports/__init__.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/text_service/transports/__init__.py new file mode 100644 index 000000000000..63721cb6cb66 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/text_service/transports/__init__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import TextServiceTransport +from .grpc import TextServiceGrpcTransport +from .grpc_asyncio import TextServiceGrpcAsyncIOTransport +from .rest import TextServiceRestInterceptor, TextServiceRestTransport + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[TextServiceTransport]] +_transport_registry["grpc"] = TextServiceGrpcTransport +_transport_registry["grpc_asyncio"] = TextServiceGrpcAsyncIOTransport +_transport_registry["rest"] = TextServiceRestTransport + +__all__ = ( + "TextServiceTransport", + "TextServiceGrpcTransport", + "TextServiceGrpcAsyncIOTransport", + "TextServiceRestTransport", + "TextServiceRestInterceptor", +) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/text_service/transports/base.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/text_service/transports/base.py new file mode 100644 index 000000000000..710f1899c143 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/text_service/transports/base.py @@ -0,0 +1,245 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.ai.generativelanguage_v1beta import gapic_version as package_version +from google.ai.generativelanguage_v1beta.types import text_service + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +class TextServiceTransport(abc.ABC): + """Abstract transport class for TextService.""" + + AUTH_SCOPES = () + + DEFAULT_HOST: str = "generativelanguage.googleapis.com" + + def __init__( + self, + *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default( + **scopes_kwargs, quota_project_id=quota_project_id + ) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience( + api_audience if api_audience else host + ) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if ( + always_use_jwt_access + and isinstance(credentials, service_account.Credentials) + and hasattr(service_account.Credentials, "with_always_use_jwt_access") + ): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.generate_text: gapic_v1.method.wrap_method( + self.generate_text, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.embed_text: gapic_v1.method.wrap_method( + self.embed_text, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.batch_embed_text: gapic_v1.method.wrap_method( + self.batch_embed_text, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.count_text_tokens: gapic_v1.method.wrap_method( + self.count_text_tokens, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def generate_text( + self, + ) -> Callable[ + [text_service.GenerateTextRequest], + Union[ + text_service.GenerateTextResponse, + Awaitable[text_service.GenerateTextResponse], + ], + ]: + raise NotImplementedError() + + @property + def embed_text( + self, + ) -> Callable[ + [text_service.EmbedTextRequest], + Union[ + text_service.EmbedTextResponse, Awaitable[text_service.EmbedTextResponse] + ], + ]: + raise NotImplementedError() + + @property + def batch_embed_text( + self, + ) -> Callable[ + [text_service.BatchEmbedTextRequest], + Union[ + text_service.BatchEmbedTextResponse, + Awaitable[text_service.BatchEmbedTextResponse], + ], + ]: + raise NotImplementedError() + + @property + def count_text_tokens( + self, + ) -> Callable[ + [text_service.CountTextTokensRequest], + Union[ + text_service.CountTextTokensResponse, + Awaitable[text_service.CountTextTokensResponse], + ], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ("TextServiceTransport",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/text_service/transports/grpc.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/text_service/transports/grpc.py new file mode 100644 index 000000000000..fd714eca100e --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/text_service/transports/grpc.py @@ -0,0 +1,357 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +from google.api_core import gapic_v1, grpc_helpers +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.longrunning import operations_pb2 # type: ignore +import grpc # type: ignore + +from google.ai.generativelanguage_v1beta.types import text_service + +from .base import DEFAULT_CLIENT_INFO, TextServiceTransport + + +class TextServiceGrpcTransport(TextServiceTransport): + """gRPC backend transport for TextService. + + API for using Generative Language Models (GLMs) trained to + generate text. + Also known as Large Language Models (LLM)s, these generate text + given an input prompt from the user. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service.""" + return self._grpc_channel + + @property + def generate_text( + self, + ) -> Callable[ + [text_service.GenerateTextRequest], text_service.GenerateTextResponse + ]: + r"""Return a callable for the generate text method over gRPC. + + Generates a response from the model given an input + message. + + Returns: + Callable[[~.GenerateTextRequest], + ~.GenerateTextResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "generate_text" not in self._stubs: + self._stubs["generate_text"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.TextService/GenerateText", + request_serializer=text_service.GenerateTextRequest.serialize, + response_deserializer=text_service.GenerateTextResponse.deserialize, + ) + return self._stubs["generate_text"] + + @property + def embed_text( + self, + ) -> Callable[[text_service.EmbedTextRequest], text_service.EmbedTextResponse]: + r"""Return a callable for the embed text method over gRPC. + + Generates an embedding from the model given an input + message. + + Returns: + Callable[[~.EmbedTextRequest], + ~.EmbedTextResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "embed_text" not in self._stubs: + self._stubs["embed_text"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.TextService/EmbedText", + request_serializer=text_service.EmbedTextRequest.serialize, + response_deserializer=text_service.EmbedTextResponse.deserialize, + ) + return self._stubs["embed_text"] + + @property + def batch_embed_text( + self, + ) -> Callable[ + [text_service.BatchEmbedTextRequest], text_service.BatchEmbedTextResponse + ]: + r"""Return a callable for the batch embed text method over gRPC. + + Generates multiple embeddings from the model given + input text in a synchronous call. + + Returns: + Callable[[~.BatchEmbedTextRequest], + ~.BatchEmbedTextResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_embed_text" not in self._stubs: + self._stubs["batch_embed_text"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.TextService/BatchEmbedText", + request_serializer=text_service.BatchEmbedTextRequest.serialize, + response_deserializer=text_service.BatchEmbedTextResponse.deserialize, + ) + return self._stubs["batch_embed_text"] + + @property + def count_text_tokens( + self, + ) -> Callable[ + [text_service.CountTextTokensRequest], text_service.CountTextTokensResponse + ]: + r"""Return a callable for the count text tokens method over gRPC. + + Runs a model's tokenizer on a text and returns the + token count. + + Returns: + Callable[[~.CountTextTokensRequest], + ~.CountTextTokensResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "count_text_tokens" not in self._stubs: + self._stubs["count_text_tokens"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.TextService/CountTextTokens", + request_serializer=text_service.CountTextTokensRequest.serialize, + response_deserializer=text_service.CountTextTokensResponse.deserialize, + ) + return self._stubs["count_text_tokens"] + + def close(self): + self.grpc_channel.close() + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ("TextServiceGrpcTransport",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/text_service/transports/grpc_asyncio.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/text_service/transports/grpc_asyncio.py new file mode 100644 index 000000000000..bb9ad8b7c2b6 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/text_service/transports/grpc_asyncio.py @@ -0,0 +1,360 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +from google.api_core import gapic_v1, grpc_helpers_async +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.longrunning import operations_pb2 # type: ignore +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.ai.generativelanguage_v1beta.types import text_service + +from .base import DEFAULT_CLIENT_INFO, TextServiceTransport +from .grpc import TextServiceGrpcTransport + + +class TextServiceGrpcAsyncIOTransport(TextServiceTransport): + """gRPC AsyncIO backend transport for TextService. + + API for using Generative Language Models (GLMs) trained to + generate text. + Also known as Large Language Models (LLM)s, these generate text + given an input prompt from the user. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def generate_text( + self, + ) -> Callable[ + [text_service.GenerateTextRequest], Awaitable[text_service.GenerateTextResponse] + ]: + r"""Return a callable for the generate text method over gRPC. + + Generates a response from the model given an input + message. + + Returns: + Callable[[~.GenerateTextRequest], + Awaitable[~.GenerateTextResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "generate_text" not in self._stubs: + self._stubs["generate_text"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.TextService/GenerateText", + request_serializer=text_service.GenerateTextRequest.serialize, + response_deserializer=text_service.GenerateTextResponse.deserialize, + ) + return self._stubs["generate_text"] + + @property + def embed_text( + self, + ) -> Callable[ + [text_service.EmbedTextRequest], Awaitable[text_service.EmbedTextResponse] + ]: + r"""Return a callable for the embed text method over gRPC. + + Generates an embedding from the model given an input + message. + + Returns: + Callable[[~.EmbedTextRequest], + Awaitable[~.EmbedTextResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "embed_text" not in self._stubs: + self._stubs["embed_text"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.TextService/EmbedText", + request_serializer=text_service.EmbedTextRequest.serialize, + response_deserializer=text_service.EmbedTextResponse.deserialize, + ) + return self._stubs["embed_text"] + + @property + def batch_embed_text( + self, + ) -> Callable[ + [text_service.BatchEmbedTextRequest], + Awaitable[text_service.BatchEmbedTextResponse], + ]: + r"""Return a callable for the batch embed text method over gRPC. + + Generates multiple embeddings from the model given + input text in a synchronous call. + + Returns: + Callable[[~.BatchEmbedTextRequest], + Awaitable[~.BatchEmbedTextResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_embed_text" not in self._stubs: + self._stubs["batch_embed_text"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.TextService/BatchEmbedText", + request_serializer=text_service.BatchEmbedTextRequest.serialize, + response_deserializer=text_service.BatchEmbedTextResponse.deserialize, + ) + return self._stubs["batch_embed_text"] + + @property + def count_text_tokens( + self, + ) -> Callable[ + [text_service.CountTextTokensRequest], + Awaitable[text_service.CountTextTokensResponse], + ]: + r"""Return a callable for the count text tokens method over gRPC. + + Runs a model's tokenizer on a text and returns the + token count. + + Returns: + Callable[[~.CountTextTokensRequest], + Awaitable[~.CountTextTokensResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "count_text_tokens" not in self._stubs: + self._stubs["count_text_tokens"] = self.grpc_channel.unary_unary( + "/google.ai.generativelanguage.v1beta.TextService/CountTextTokens", + request_serializer=text_service.CountTextTokensRequest.serialize, + response_deserializer=text_service.CountTextTokensResponse.deserialize, + ) + return self._stubs["count_text_tokens"] + + def close(self): + return self.grpc_channel.close() + + +__all__ = ("TextServiceGrpcAsyncIOTransport",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/text_service/transports/rest.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/text_service/transports/rest.py new file mode 100644 index 000000000000..c105ca95dc41 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/services/text_service/transports/rest.py @@ -0,0 +1,746 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import dataclasses +import json # type: ignore +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + +from google.api_core import gapic_v1, path_template, rest_helpers, rest_streaming +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.transport.requests import AuthorizedSession # type: ignore +from google.protobuf import json_format +import grpc # type: ignore +from requests import __version__ as requests_version + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + + +from google.longrunning import operations_pb2 # type: ignore + +from google.ai.generativelanguage_v1beta.types import text_service + +from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO +from .base import TextServiceTransport + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + + +class TextServiceRestInterceptor: + """Interceptor for TextService. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the TextServiceRestTransport. + + .. code-block:: python + class MyCustomTextServiceInterceptor(TextServiceRestInterceptor): + def pre_batch_embed_text(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_batch_embed_text(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_count_text_tokens(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_count_text_tokens(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_embed_text(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_embed_text(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_generate_text(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_generate_text(self, response): + logging.log(f"Received response: {response}") + return response + + transport = TextServiceRestTransport(interceptor=MyCustomTextServiceInterceptor()) + client = TextServiceClient(transport=transport) + + + """ + + def pre_batch_embed_text( + self, + request: text_service.BatchEmbedTextRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[text_service.BatchEmbedTextRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for batch_embed_text + + Override in a subclass to manipulate the request or metadata + before they are sent to the TextService server. + """ + return request, metadata + + def post_batch_embed_text( + self, response: text_service.BatchEmbedTextResponse + ) -> text_service.BatchEmbedTextResponse: + """Post-rpc interceptor for batch_embed_text + + Override in a subclass to manipulate the response + after it is returned by the TextService server but before + it is returned to user code. + """ + return response + + def pre_count_text_tokens( + self, + request: text_service.CountTextTokensRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[text_service.CountTextTokensRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for count_text_tokens + + Override in a subclass to manipulate the request or metadata + before they are sent to the TextService server. + """ + return request, metadata + + def post_count_text_tokens( + self, response: text_service.CountTextTokensResponse + ) -> text_service.CountTextTokensResponse: + """Post-rpc interceptor for count_text_tokens + + Override in a subclass to manipulate the response + after it is returned by the TextService server but before + it is returned to user code. + """ + return response + + def pre_embed_text( + self, + request: text_service.EmbedTextRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[text_service.EmbedTextRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for embed_text + + Override in a subclass to manipulate the request or metadata + before they are sent to the TextService server. + """ + return request, metadata + + def post_embed_text( + self, response: text_service.EmbedTextResponse + ) -> text_service.EmbedTextResponse: + """Post-rpc interceptor for embed_text + + Override in a subclass to manipulate the response + after it is returned by the TextService server but before + it is returned to user code. + """ + return response + + def pre_generate_text( + self, + request: text_service.GenerateTextRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[text_service.GenerateTextRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for generate_text + + Override in a subclass to manipulate the request or metadata + before they are sent to the TextService server. + """ + return request, metadata + + def post_generate_text( + self, response: text_service.GenerateTextResponse + ) -> text_service.GenerateTextResponse: + """Post-rpc interceptor for generate_text + + Override in a subclass to manipulate the response + after it is returned by the TextService server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class TextServiceRestStub: + _session: AuthorizedSession + _host: str + _interceptor: TextServiceRestInterceptor + + +class TextServiceRestTransport(TextServiceTransport): + """REST backend transport for TextService. + + API for using Generative Language Models (GLMs) trained to + generate text. + Also known as Large Language Models (LLM)s, these generate text + given an input prompt from the user. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + + """ + + def __init__( + self, + *, + host: str = "generativelanguage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + interceptor: Optional[TextServiceRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError( + f"Unexpected hostname structure: {host}" + ) # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST + ) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or TextServiceRestInterceptor() + self._prep_wrapped_messages(client_info) + + class _BatchEmbedText(TextServiceRestStub): + def __hash__(self): + return hash("BatchEmbedText") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: text_service.BatchEmbedTextRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> text_service.BatchEmbedTextResponse: + r"""Call the batch embed text method over HTTP. + + Args: + request (~.text_service.BatchEmbedTextRequest): + The request object. Batch request to get a text embedding + from the model. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.text_service.BatchEmbedTextResponse: + The response to a EmbedTextRequest. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta/{model=models/*}:batchEmbedText", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_batch_embed_text( + request, metadata + ) + pb_request = text_service.BatchEmbedTextRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = text_service.BatchEmbedTextResponse() + pb_resp = text_service.BatchEmbedTextResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_batch_embed_text(resp) + return resp + + class _CountTextTokens(TextServiceRestStub): + def __hash__(self): + return hash("CountTextTokens") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: text_service.CountTextTokensRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> text_service.CountTextTokensResponse: + r"""Call the count text tokens method over HTTP. + + Args: + request (~.text_service.CountTextTokensRequest): + The request object. Counts the number of tokens in the ``prompt`` sent to a + model. + + Models may tokenize text differently, so each model may + return a different ``token_count``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.text_service.CountTextTokensResponse: + A response from ``CountTextTokens``. + + It returns the model's ``token_count`` for the + ``prompt``. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta/{model=models/*}:countTextTokens", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_count_text_tokens( + request, metadata + ) + pb_request = text_service.CountTextTokensRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = text_service.CountTextTokensResponse() + pb_resp = text_service.CountTextTokensResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_count_text_tokens(resp) + return resp + + class _EmbedText(TextServiceRestStub): + def __hash__(self): + return hash("EmbedText") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: text_service.EmbedTextRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> text_service.EmbedTextResponse: + r"""Call the embed text method over HTTP. + + Args: + request (~.text_service.EmbedTextRequest): + The request object. Request to get a text embedding from + the model. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.text_service.EmbedTextResponse: + The response to a EmbedTextRequest. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta/{model=models/*}:embedText", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_embed_text(request, metadata) + pb_request = text_service.EmbedTextRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = text_service.EmbedTextResponse() + pb_resp = text_service.EmbedTextResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_embed_text(resp) + return resp + + class _GenerateText(TextServiceRestStub): + def __hash__(self): + return hash("GenerateText") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: text_service.GenerateTextRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> text_service.GenerateTextResponse: + r"""Call the generate text method over HTTP. + + Args: + request (~.text_service.GenerateTextRequest): + The request object. Request to generate a text completion + response from the model. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.text_service.GenerateTextResponse: + The response from the model, + including candidate completions. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta/{model=models/*}:generateText", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta/{model=tunedModels/*}:generateText", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_generate_text(request, metadata) + pb_request = text_service.GenerateTextRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = text_service.GenerateTextResponse() + pb_resp = text_service.GenerateTextResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_generate_text(resp) + return resp + + @property + def batch_embed_text( + self, + ) -> Callable[ + [text_service.BatchEmbedTextRequest], text_service.BatchEmbedTextResponse + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._BatchEmbedText(self._session, self._host, self._interceptor) # type: ignore + + @property + def count_text_tokens( + self, + ) -> Callable[ + [text_service.CountTextTokensRequest], text_service.CountTextTokensResponse + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CountTextTokens(self._session, self._host, self._interceptor) # type: ignore + + @property + def embed_text( + self, + ) -> Callable[[text_service.EmbedTextRequest], text_service.EmbedTextResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._EmbedText(self._session, self._host, self._interceptor) # type: ignore + + @property + def generate_text( + self, + ) -> Callable[ + [text_service.GenerateTextRequest], text_service.GenerateTextResponse + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GenerateText(self._session, self._host, self._interceptor) # type: ignore + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__ = ("TextServiceRestTransport",) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/__init__.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/__init__.py new file mode 100644 index 000000000000..a040a0d8eb4d --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/__init__.py @@ -0,0 +1,271 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .citation import CitationMetadata, CitationSource +from .content import ( + Blob, + Content, + FunctionCall, + FunctionDeclaration, + FunctionResponse, + GroundingPassage, + GroundingPassages, + Part, + Schema, + Tool, + Type, +) +from .discuss_service import ( + CountMessageTokensRequest, + CountMessageTokensResponse, + Example, + GenerateMessageRequest, + GenerateMessageResponse, + Message, + MessagePrompt, +) +from .generative_service import ( + AttributionSourceId, + BatchEmbedContentsRequest, + BatchEmbedContentsResponse, + Candidate, + ContentEmbedding, + CountTokensRequest, + CountTokensResponse, + EmbedContentRequest, + EmbedContentResponse, + GenerateAnswerRequest, + GenerateAnswerResponse, + GenerateContentRequest, + GenerateContentResponse, + GenerationConfig, + GroundingAttribution, + SemanticRetrieverConfig, + TaskType, +) +from .model import Model +from .model_service import ( + CreateTunedModelMetadata, + CreateTunedModelRequest, + DeleteTunedModelRequest, + GetModelRequest, + GetTunedModelRequest, + ListModelsRequest, + ListModelsResponse, + ListTunedModelsRequest, + ListTunedModelsResponse, + UpdateTunedModelRequest, +) +from .permission import Permission +from .permission_service import ( + CreatePermissionRequest, + DeletePermissionRequest, + GetPermissionRequest, + ListPermissionsRequest, + ListPermissionsResponse, + TransferOwnershipRequest, + TransferOwnershipResponse, + UpdatePermissionRequest, +) +from .retriever import ( + Chunk, + ChunkData, + Condition, + Corpus, + CustomMetadata, + Document, + MetadataFilter, + StringList, +) +from .retriever_service import ( + BatchCreateChunksRequest, + BatchCreateChunksResponse, + BatchDeleteChunksRequest, + BatchUpdateChunksRequest, + BatchUpdateChunksResponse, + CreateChunkRequest, + CreateCorpusRequest, + CreateDocumentRequest, + DeleteChunkRequest, + DeleteCorpusRequest, + DeleteDocumentRequest, + GetChunkRequest, + GetCorpusRequest, + GetDocumentRequest, + ListChunksRequest, + ListChunksResponse, + ListCorporaRequest, + ListCorporaResponse, + ListDocumentsRequest, + ListDocumentsResponse, + QueryCorpusRequest, + QueryCorpusResponse, + QueryDocumentRequest, + QueryDocumentResponse, + RelevantChunk, + UpdateChunkRequest, + UpdateCorpusRequest, + UpdateDocumentRequest, +) +from .safety import ( + ContentFilter, + HarmCategory, + SafetyFeedback, + SafetyRating, + SafetySetting, +) +from .text_service import ( + BatchEmbedTextRequest, + BatchEmbedTextResponse, + CountTextTokensRequest, + CountTextTokensResponse, + Embedding, + EmbedTextRequest, + EmbedTextResponse, + GenerateTextRequest, + GenerateTextResponse, + TextCompletion, + TextPrompt, +) +from .tuned_model import ( + Dataset, + Hyperparameters, + TunedModel, + TunedModelSource, + TuningExample, + TuningExamples, + TuningSnapshot, + TuningTask, +) + +__all__ = ( + "CitationMetadata", + "CitationSource", + "Blob", + "Content", + "FunctionCall", + "FunctionDeclaration", + "FunctionResponse", + "GroundingPassage", + "GroundingPassages", + "Part", + "Schema", + "Tool", + "Type", + "CountMessageTokensRequest", + "CountMessageTokensResponse", + "Example", + "GenerateMessageRequest", + "GenerateMessageResponse", + "Message", + "MessagePrompt", + "AttributionSourceId", + "BatchEmbedContentsRequest", + "BatchEmbedContentsResponse", + "Candidate", + "ContentEmbedding", + "CountTokensRequest", + "CountTokensResponse", + "EmbedContentRequest", + "EmbedContentResponse", + "GenerateAnswerRequest", + "GenerateAnswerResponse", + "GenerateContentRequest", + "GenerateContentResponse", + "GenerationConfig", + "GroundingAttribution", + "SemanticRetrieverConfig", + "TaskType", + "Model", + "CreateTunedModelMetadata", + "CreateTunedModelRequest", + "DeleteTunedModelRequest", + "GetModelRequest", + "GetTunedModelRequest", + "ListModelsRequest", + "ListModelsResponse", + "ListTunedModelsRequest", + "ListTunedModelsResponse", + "UpdateTunedModelRequest", + "Permission", + "CreatePermissionRequest", + "DeletePermissionRequest", + "GetPermissionRequest", + "ListPermissionsRequest", + "ListPermissionsResponse", + "TransferOwnershipRequest", + "TransferOwnershipResponse", + "UpdatePermissionRequest", + "Chunk", + "ChunkData", + "Condition", + "Corpus", + "CustomMetadata", + "Document", + "MetadataFilter", + "StringList", + "BatchCreateChunksRequest", + "BatchCreateChunksResponse", + "BatchDeleteChunksRequest", + "BatchUpdateChunksRequest", + "BatchUpdateChunksResponse", + "CreateChunkRequest", + "CreateCorpusRequest", + "CreateDocumentRequest", + "DeleteChunkRequest", + "DeleteCorpusRequest", + "DeleteDocumentRequest", + "GetChunkRequest", + "GetCorpusRequest", + "GetDocumentRequest", + "ListChunksRequest", + "ListChunksResponse", + "ListCorporaRequest", + "ListCorporaResponse", + "ListDocumentsRequest", + "ListDocumentsResponse", + "QueryCorpusRequest", + "QueryCorpusResponse", + "QueryDocumentRequest", + "QueryDocumentResponse", + "RelevantChunk", + "UpdateChunkRequest", + "UpdateCorpusRequest", + "UpdateDocumentRequest", + "ContentFilter", + "SafetyFeedback", + "SafetyRating", + "SafetySetting", + "HarmCategory", + "BatchEmbedTextRequest", + "BatchEmbedTextResponse", + "CountTextTokensRequest", + "CountTextTokensResponse", + "Embedding", + "EmbedTextRequest", + "EmbedTextResponse", + "GenerateTextRequest", + "GenerateTextResponse", + "TextCompletion", + "TextPrompt", + "Dataset", + "Hyperparameters", + "TunedModel", + "TunedModelSource", + "TuningExample", + "TuningExamples", + "TuningSnapshot", + "TuningTask", +) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/citation.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/citation.py new file mode 100644 index 000000000000..cbeb43c1edbd --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/citation.py @@ -0,0 +1,101 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +__protobuf__ = proto.module( + package="google.ai.generativelanguage.v1beta", + manifest={ + "CitationMetadata", + "CitationSource", + }, +) + + +class CitationMetadata(proto.Message): + r"""A collection of source attributions for a piece of content. + + Attributes: + citation_sources (MutableSequence[google.ai.generativelanguage_v1beta.types.CitationSource]): + Citations to sources for a specific response. + """ + + citation_sources: MutableSequence["CitationSource"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="CitationSource", + ) + + +class CitationSource(proto.Message): + r"""A citation to a source for a portion of a specific response. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + start_index (int): + Optional. Start of segment of the response + that is attributed to this source. + + Index indicates the start of the segment, + measured in bytes. + + This field is a member of `oneof`_ ``_start_index``. + end_index (int): + Optional. End of the attributed segment, + exclusive. + + This field is a member of `oneof`_ ``_end_index``. + uri (str): + Optional. URI that is attributed as a source + for a portion of the text. + + This field is a member of `oneof`_ ``_uri``. + license_ (str): + Optional. License for the GitHub project that + is attributed as a source for segment. + + License info is required for code citations. + + This field is a member of `oneof`_ ``_license``. + """ + + start_index: int = proto.Field( + proto.INT32, + number=1, + optional=True, + ) + end_index: int = proto.Field( + proto.INT32, + number=2, + optional=True, + ) + uri: str = proto.Field( + proto.STRING, + number=3, + optional=True, + ) + license_: str = proto.Field( + proto.STRING, + number=4, + optional=True, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/content.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/content.py new file mode 100644 index 000000000000..b89637b999f8 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/content.py @@ -0,0 +1,449 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +from google.protobuf import struct_pb2 # type: ignore +import proto # type: ignore + +__protobuf__ = proto.module( + package="google.ai.generativelanguage.v1beta", + manifest={ + "Type", + "Content", + "Part", + "Blob", + "Tool", + "FunctionDeclaration", + "FunctionCall", + "FunctionResponse", + "Schema", + "GroundingPassage", + "GroundingPassages", + }, +) + + +class Type(proto.Enum): + r"""Type contains the list of OpenAPI data types as defined by + https://spec.openapis.org/oas/v3.0.3#data-types + + Values: + TYPE_UNSPECIFIED (0): + Not specified, should not be used. + STRING (1): + String type. + NUMBER (2): + Number type. + INTEGER (3): + Integer type. + BOOLEAN (4): + Boolean type. + ARRAY (5): + Array type. + OBJECT (6): + Object type. + """ + TYPE_UNSPECIFIED = 0 + STRING = 1 + NUMBER = 2 + INTEGER = 3 + BOOLEAN = 4 + ARRAY = 5 + OBJECT = 6 + + +class Content(proto.Message): + r"""The base structured datatype containing multi-part content of a + message. + + A ``Content`` includes a ``role`` field designating the producer of + the ``Content`` and a ``parts`` field containing multi-part data + that contains the content of the message turn. + + Attributes: + parts (MutableSequence[google.ai.generativelanguage_v1beta.types.Part]): + Ordered ``Parts`` that constitute a single message. Parts + may have different MIME types. + role (str): + Optional. The producer of the content. Must + be either 'user' or 'model'. + Useful to set for multi-turn conversations, + otherwise can be left blank or unset. + """ + + parts: MutableSequence["Part"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="Part", + ) + role: str = proto.Field( + proto.STRING, + number=2, + ) + + +class Part(proto.Message): + r"""A datatype containing media that is part of a multi-part ``Content`` + message. + + A ``Part`` consists of data which has an associated datatype. A + ``Part`` can only contain one of the accepted types in + ``Part.data``. + + A ``Part`` must have a fixed IANA MIME type identifying the type and + subtype of the media if the ``inline_data`` field is filled with raw + bytes. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + text (str): + Inline text. + + This field is a member of `oneof`_ ``data``. + inline_data (google.ai.generativelanguage_v1beta.types.Blob): + Inline media bytes. + + This field is a member of `oneof`_ ``data``. + function_call (google.ai.generativelanguage_v1beta.types.FunctionCall): + A predicted ``FunctionCall`` returned from the model that + contains a string representing the + ``FunctionDeclaration.name`` with the arguments and their + values. + + This field is a member of `oneof`_ ``data``. + function_response (google.ai.generativelanguage_v1beta.types.FunctionResponse): + The result output of a ``FunctionCall`` that contains a + string representing the ``FunctionDeclaration.name`` and a + structured JSON object containing any output from the + function is used as context to the model. + + This field is a member of `oneof`_ ``data``. + """ + + text: str = proto.Field( + proto.STRING, + number=2, + oneof="data", + ) + inline_data: "Blob" = proto.Field( + proto.MESSAGE, + number=3, + oneof="data", + message="Blob", + ) + function_call: "FunctionCall" = proto.Field( + proto.MESSAGE, + number=4, + oneof="data", + message="FunctionCall", + ) + function_response: "FunctionResponse" = proto.Field( + proto.MESSAGE, + number=5, + oneof="data", + message="FunctionResponse", + ) + + +class Blob(proto.Message): + r"""Raw media bytes. + + Text should not be sent as raw bytes, use the 'text' field. + + Attributes: + mime_type (str): + The IANA standard MIME type of the source + data. Accepted types include: "image/png", + "image/jpeg", "image/heic", "image/heif", + "image/webp". + data (bytes): + Raw bytes for media formats. + """ + + mime_type: str = proto.Field( + proto.STRING, + number=1, + ) + data: bytes = proto.Field( + proto.BYTES, + number=2, + ) + + +class Tool(proto.Message): + r"""Tool details that the model may use to generate response. + + A ``Tool`` is a piece of code that enables the system to interact + with external systems to perform an action, or set of actions, + outside of knowledge and scope of the model. + + Attributes: + function_declarations (MutableSequence[google.ai.generativelanguage_v1beta.types.FunctionDeclaration]): + Optional. A list of ``FunctionDeclarations`` available to + the model that can be used for function calling. + + The model or system does not execute the function. Instead + the defined function may be returned as a + [FunctionCall][content.part.function_call] with arguments to + the client side for execution. The model may decide to call + a subset of these functions by populating + [FunctionCall][content.part.function_call] in the response. + The next conversation turn may contain a + [FunctionResponse][content.part.function_response] with the + [conent.role] "function" generation context for the next + model turn. + """ + + function_declarations: MutableSequence["FunctionDeclaration"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="FunctionDeclaration", + ) + + +class FunctionDeclaration(proto.Message): + r"""Structured representation of a function declaration as defined by + the `OpenAPI 3.03 + specification `__. Included in + this declaration are the function name and parameters. This + FunctionDeclaration is a representation of a block of code that can + be used as a ``Tool`` by the model and executed by the client. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + name (str): + Required. The name of the function. + Must be a-z, A-Z, 0-9, or contain underscores + and dashes, with a maximum length of 63. + description (str): + Required. A brief description of the + function. + parameters (google.ai.generativelanguage_v1beta.types.Schema): + Optional. Describes the parameters to this + function. Reflects the Open API 3.03 Parameter + Object string Key: the name of the parameter. + Parameter names are case sensitive. Schema + Value: the Schema defining the type used for the + parameter. + + This field is a member of `oneof`_ ``_parameters``. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + description: str = proto.Field( + proto.STRING, + number=2, + ) + parameters: "Schema" = proto.Field( + proto.MESSAGE, + number=3, + optional=True, + message="Schema", + ) + + +class FunctionCall(proto.Message): + r"""A predicted ``FunctionCall`` returned from the model that contains a + string representing the ``FunctionDeclaration.name`` with the + arguments and their values. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + name (str): + Required. The name of the function to call. + Must be a-z, A-Z, 0-9, or contain underscores + and dashes, with a maximum length of 63. + args (google.protobuf.struct_pb2.Struct): + Optional. The function parameters and values + in JSON object format. + + This field is a member of `oneof`_ ``_args``. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + args: struct_pb2.Struct = proto.Field( + proto.MESSAGE, + number=2, + optional=True, + message=struct_pb2.Struct, + ) + + +class FunctionResponse(proto.Message): + r"""The result output from a ``FunctionCall`` that contains a string + representing the ``FunctionDeclaration.name`` and a structured JSON + object containing any output from the function is used as context to + the model. This should contain the result of a\ ``FunctionCall`` + made based on model prediction. + + Attributes: + name (str): + Required. The name of the function to call. + Must be a-z, A-Z, 0-9, or contain underscores + and dashes, with a maximum length of 63. + response (google.protobuf.struct_pb2.Struct): + Required. The function response in JSON + object format. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + response: struct_pb2.Struct = proto.Field( + proto.MESSAGE, + number=2, + message=struct_pb2.Struct, + ) + + +class Schema(proto.Message): + r"""The ``Schema`` object allows the definition of input and output data + types. These types can be objects, but also primitives and arrays. + Represents a select subset of an `OpenAPI 3.0 schema + object `__. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + type_ (google.ai.generativelanguage_v1beta.types.Type): + Optional. Data type. + format_ (str): + Optional. The format of the data. This is + used obnly for primative datatypes. Supported + formats: + + for NUMBER type: float, double + for INTEGER type: int32, int64 + description (str): + Optional. A brief description of the + parameter. This could contain examples of use. + Parameter description may be formatted as + Markdown. + nullable (bool): + Optional. Indicates if the value may be null. + enum (MutableSequence[str]): + Optional. Possible values of the element of Type.STRING with + enum format. For example we can define an Enum Direction as + : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", + "WEST"]} + items (google.ai.generativelanguage_v1beta.types.Schema): + Optional. Schema of the elements of + Type.ARRAY. + + This field is a member of `oneof`_ ``_items``. + properties (MutableMapping[str, google.ai.generativelanguage_v1beta.types.Schema]): + Optional. Properties of Type.OBJECT. + required (MutableSequence[str]): + Optional. Required properties of Type.OBJECT. + """ + + type_: "Type" = proto.Field( + proto.ENUM, + number=1, + enum="Type", + ) + format_: str = proto.Field( + proto.STRING, + number=2, + ) + description: str = proto.Field( + proto.STRING, + number=3, + ) + nullable: bool = proto.Field( + proto.BOOL, + number=4, + ) + enum: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=5, + ) + items: "Schema" = proto.Field( + proto.MESSAGE, + number=6, + optional=True, + message="Schema", + ) + properties: MutableMapping[str, "Schema"] = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=7, + message="Schema", + ) + required: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=8, + ) + + +class GroundingPassage(proto.Message): + r"""Passage included inline with a grounding configuration. + + Attributes: + id (str): + Identifier for the passage for attributing + this passage in grounded answers. + content (google.ai.generativelanguage_v1beta.types.Content): + Content of the passage. + """ + + id: str = proto.Field( + proto.STRING, + number=1, + ) + content: "Content" = proto.Field( + proto.MESSAGE, + number=2, + message="Content", + ) + + +class GroundingPassages(proto.Message): + r"""A repeated list of passages. + + Attributes: + passages (MutableSequence[google.ai.generativelanguage_v1beta.types.GroundingPassage]): + List of passages. + """ + + passages: MutableSequence["GroundingPassage"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="GroundingPassage", + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/discuss_service.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/discuss_service.py new file mode 100644 index 000000000000..18ba818aaf49 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/discuss_service.py @@ -0,0 +1,356 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.ai.generativelanguage_v1beta.types import citation, safety + +__protobuf__ = proto.module( + package="google.ai.generativelanguage.v1beta", + manifest={ + "GenerateMessageRequest", + "GenerateMessageResponse", + "Message", + "MessagePrompt", + "Example", + "CountMessageTokensRequest", + "CountMessageTokensResponse", + }, +) + + +class GenerateMessageRequest(proto.Message): + r"""Request to generate a message response from the model. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + model (str): + Required. The name of the model to use. + + Format: ``name=models/{model}``. + prompt (google.ai.generativelanguage_v1beta.types.MessagePrompt): + Required. The structured textual input given + to the model as a prompt. + Given a + prompt, the model will return what it predicts + is the next message in the discussion. + temperature (float): + Optional. Controls the randomness of the output. + + Values can range over ``[0.0,1.0]``, inclusive. A value + closer to ``1.0`` will produce responses that are more + varied, while a value closer to ``0.0`` will typically + result in less surprising responses from the model. + + This field is a member of `oneof`_ ``_temperature``. + candidate_count (int): + Optional. The number of generated response messages to + return. + + This value must be between ``[1, 8]``, inclusive. If unset, + this will default to ``1``. + + This field is a member of `oneof`_ ``_candidate_count``. + top_p (float): + Optional. The maximum cumulative probability of tokens to + consider when sampling. + + The model uses combined Top-k and nucleus sampling. + + Nucleus sampling considers the smallest set of tokens whose + probability sum is at least ``top_p``. + + This field is a member of `oneof`_ ``_top_p``. + top_k (int): + Optional. The maximum number of tokens to consider when + sampling. + + The model uses combined Top-k and nucleus sampling. + + Top-k sampling considers the set of ``top_k`` most probable + tokens. + + This field is a member of `oneof`_ ``_top_k``. + """ + + model: str = proto.Field( + proto.STRING, + number=1, + ) + prompt: "MessagePrompt" = proto.Field( + proto.MESSAGE, + number=2, + message="MessagePrompt", + ) + temperature: float = proto.Field( + proto.FLOAT, + number=3, + optional=True, + ) + candidate_count: int = proto.Field( + proto.INT32, + number=4, + optional=True, + ) + top_p: float = proto.Field( + proto.FLOAT, + number=5, + optional=True, + ) + top_k: int = proto.Field( + proto.INT32, + number=6, + optional=True, + ) + + +class GenerateMessageResponse(proto.Message): + r"""The response from the model. + + This includes candidate messages and + conversation history in the form of chronologically-ordered + messages. + + Attributes: + candidates (MutableSequence[google.ai.generativelanguage_v1beta.types.Message]): + Candidate response messages from the model. + messages (MutableSequence[google.ai.generativelanguage_v1beta.types.Message]): + The conversation history used by the model. + filters (MutableSequence[google.ai.generativelanguage_v1beta.types.ContentFilter]): + A set of content filtering metadata for the prompt and + response text. + + This indicates which ``SafetyCategory``\ (s) blocked a + candidate from this response, the lowest ``HarmProbability`` + that triggered a block, and the HarmThreshold setting for + that category. + """ + + candidates: MutableSequence["Message"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="Message", + ) + messages: MutableSequence["Message"] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message="Message", + ) + filters: MutableSequence[safety.ContentFilter] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=safety.ContentFilter, + ) + + +class Message(proto.Message): + r"""The base unit of structured text. + + A ``Message`` includes an ``author`` and the ``content`` of the + ``Message``. + + The ``author`` is used to tag messages when they are fed to the + model as text. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + author (str): + Optional. The author of this Message. + + This serves as a key for tagging + the content of this Message when it is fed to + the model as text. + + The author can be any alphanumeric string. + content (str): + Required. The text content of the structured ``Message``. + citation_metadata (google.ai.generativelanguage_v1beta.types.CitationMetadata): + Output only. Citation information for model-generated + ``content`` in this ``Message``. + + If this ``Message`` was generated as output from the model, + this field may be populated with attribution information for + any text included in the ``content``. This field is used + only on output. + + This field is a member of `oneof`_ ``_citation_metadata``. + """ + + author: str = proto.Field( + proto.STRING, + number=1, + ) + content: str = proto.Field( + proto.STRING, + number=2, + ) + citation_metadata: citation.CitationMetadata = proto.Field( + proto.MESSAGE, + number=3, + optional=True, + message=citation.CitationMetadata, + ) + + +class MessagePrompt(proto.Message): + r"""All of the structured input text passed to the model as a prompt. + + A ``MessagePrompt`` contains a structured set of fields that provide + context for the conversation, examples of user input/model output + message pairs that prime the model to respond in different ways, and + the conversation history or list of messages representing the + alternating turns of the conversation between the user and the + model. + + Attributes: + context (str): + Optional. Text that should be provided to the model first to + ground the response. + + If not empty, this ``context`` will be given to the model + first before the ``examples`` and ``messages``. When using a + ``context`` be sure to provide it with every request to + maintain continuity. + + This field can be a description of your prompt to the model + to help provide context and guide the responses. Examples: + "Translate the phrase from English to French." or "Given a + statement, classify the sentiment as happy, sad or neutral." + + Anything included in this field will take precedence over + message history if the total input size exceeds the model's + ``input_token_limit`` and the input request is truncated. + examples (MutableSequence[google.ai.generativelanguage_v1beta.types.Example]): + Optional. Examples of what the model should generate. + + This includes both user input and the response that the + model should emulate. + + These ``examples`` are treated identically to conversation + messages except that they take precedence over the history + in ``messages``: If the total input size exceeds the model's + ``input_token_limit`` the input will be truncated. Items + will be dropped from ``messages`` before ``examples``. + messages (MutableSequence[google.ai.generativelanguage_v1beta.types.Message]): + Required. A snapshot of the recent conversation history + sorted chronologically. + + Turns alternate between two authors. + + If the total input size exceeds the model's + ``input_token_limit`` the input will be truncated: The + oldest items will be dropped from ``messages``. + """ + + context: str = proto.Field( + proto.STRING, + number=1, + ) + examples: MutableSequence["Example"] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message="Example", + ) + messages: MutableSequence["Message"] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message="Message", + ) + + +class Example(proto.Message): + r"""An input/output example used to instruct the Model. + + It demonstrates how the model should respond or format its + response. + + Attributes: + input (google.ai.generativelanguage_v1beta.types.Message): + Required. An example of an input ``Message`` from the user. + output (google.ai.generativelanguage_v1beta.types.Message): + Required. An example of what the model should + output given the input. + """ + + input: "Message" = proto.Field( + proto.MESSAGE, + number=1, + message="Message", + ) + output: "Message" = proto.Field( + proto.MESSAGE, + number=2, + message="Message", + ) + + +class CountMessageTokensRequest(proto.Message): + r"""Counts the number of tokens in the ``prompt`` sent to a model. + + Models may tokenize text differently, so each model may return a + different ``token_count``. + + Attributes: + model (str): + Required. The model's resource name. This serves as an ID + for the Model to use. + + This name should match a model name returned by the + ``ListModels`` method. + + Format: ``models/{model}`` + prompt (google.ai.generativelanguage_v1beta.types.MessagePrompt): + Required. The prompt, whose token count is to + be returned. + """ + + model: str = proto.Field( + proto.STRING, + number=1, + ) + prompt: "MessagePrompt" = proto.Field( + proto.MESSAGE, + number=2, + message="MessagePrompt", + ) + + +class CountMessageTokensResponse(proto.Message): + r"""A response from ``CountMessageTokens``. + + It returns the model's ``token_count`` for the ``prompt``. + + Attributes: + token_count (int): + The number of tokens that the ``model`` tokenizes the + ``prompt`` into. + + Always non-negative. + """ + + token_count: int = proto.Field( + proto.INT32, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/generative_service.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/generative_service.py new file mode 100644 index 000000000000..2f83c248b269 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/generative_service.py @@ -0,0 +1,952 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.ai.generativelanguage_v1beta.types import citation +from google.ai.generativelanguage_v1beta.types import content as gag_content +from google.ai.generativelanguage_v1beta.types import retriever, safety + +__protobuf__ = proto.module( + package="google.ai.generativelanguage.v1beta", + manifest={ + "TaskType", + "GenerateContentRequest", + "GenerationConfig", + "SemanticRetrieverConfig", + "GenerateContentResponse", + "Candidate", + "AttributionSourceId", + "GroundingAttribution", + "GenerateAnswerRequest", + "GenerateAnswerResponse", + "EmbedContentRequest", + "ContentEmbedding", + "EmbedContentResponse", + "BatchEmbedContentsRequest", + "BatchEmbedContentsResponse", + "CountTokensRequest", + "CountTokensResponse", + }, +) + + +class TaskType(proto.Enum): + r"""Type of task for which the embedding will be used. + + Values: + TASK_TYPE_UNSPECIFIED (0): + Unset value, which will default to one of the + other enum values. + RETRIEVAL_QUERY (1): + Specifies the given text is a query in a + search/retrieval setting. + RETRIEVAL_DOCUMENT (2): + Specifies the given text is a document from + the corpus being searched. + SEMANTIC_SIMILARITY (3): + Specifies the given text will be used for + STS. + CLASSIFICATION (4): + Specifies that the given text will be + classified. + CLUSTERING (5): + Specifies that the embeddings will be used + for clustering. + """ + TASK_TYPE_UNSPECIFIED = 0 + RETRIEVAL_QUERY = 1 + RETRIEVAL_DOCUMENT = 2 + SEMANTIC_SIMILARITY = 3 + CLASSIFICATION = 4 + CLUSTERING = 5 + + +class GenerateContentRequest(proto.Message): + r"""Request to generate a completion from the model. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + model (str): + Required. The name of the ``Model`` to use for generating + the completion. + + Format: ``name=models/{model}``. + contents (MutableSequence[google.ai.generativelanguage_v1beta.types.Content]): + Required. The content of the current + conversation with the model. + For single-turn queries, this is a single + instance. For multi-turn queries, this is a + repeated field that contains conversation + history + latest request. + tools (MutableSequence[google.ai.generativelanguage_v1beta.types.Tool]): + Optional. A list of ``Tools`` the model may use to generate + the next response. + + A ``Tool`` is a piece of code that enables the system to + interact with external systems to perform an action, or set + of actions, outside of knowledge and scope of the model. The + only supported tool is currently ``Function``. + safety_settings (MutableSequence[google.ai.generativelanguage_v1beta.types.SafetySetting]): + Optional. A list of unique ``SafetySetting`` instances for + blocking unsafe content. + + This will be enforced on the + ``GenerateContentRequest.contents`` and + ``GenerateContentResponse.candidates``. There should not be + more than one setting for each ``SafetyCategory`` type. The + API will block any contents and responses that fail to meet + the thresholds set by these settings. This list overrides + the default settings for each ``SafetyCategory`` specified + in the safety_settings. If there is no ``SafetySetting`` for + a given ``SafetyCategory`` provided in the list, the API + will use the default safety setting for that category. Harm + categories HARM_CATEGORY_HATE_SPEECH, + HARM_CATEGORY_SEXUALLY_EXPLICIT, + HARM_CATEGORY_DANGEROUS_CONTENT, HARM_CATEGORY_HARASSMENT + are supported. + generation_config (google.ai.generativelanguage_v1beta.types.GenerationConfig): + Optional. Configuration options for model + generation and outputs. + + This field is a member of `oneof`_ ``_generation_config``. + """ + + model: str = proto.Field( + proto.STRING, + number=1, + ) + contents: MutableSequence[gag_content.Content] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=gag_content.Content, + ) + tools: MutableSequence[gag_content.Tool] = proto.RepeatedField( + proto.MESSAGE, + number=5, + message=gag_content.Tool, + ) + safety_settings: MutableSequence[safety.SafetySetting] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=safety.SafetySetting, + ) + generation_config: "GenerationConfig" = proto.Field( + proto.MESSAGE, + number=4, + optional=True, + message="GenerationConfig", + ) + + +class GenerationConfig(proto.Message): + r"""Configuration options for model generation and outputs. Not + all parameters may be configurable for every model. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + candidate_count (int): + Optional. Number of generated responses to return. + + This value must be between [1, 8], inclusive. If unset, this + will default to 1. + + This field is a member of `oneof`_ ``_candidate_count``. + stop_sequences (MutableSequence[str]): + Optional. The set of character sequences (up + to 5) that will stop output generation. If + specified, the API will stop at the first + appearance of a stop sequence. The stop sequence + will not be included as part of the response. + max_output_tokens (int): + Optional. The maximum number of tokens to include in a + candidate. + + If unset, this will default to output_token_limit specified + in the ``Model`` specification. + + This field is a member of `oneof`_ ``_max_output_tokens``. + temperature (float): + Optional. Controls the randomness of the output. Note: The + default value varies by model, see the ``Model.temperature`` + attribute of the ``Model`` returned the ``getModel`` + function. + + Values can range from [0.0,1.0], inclusive. A value closer + to 1.0 will produce responses that are more varied and + creative, while a value closer to 0.0 will typically result + in more straightforward responses from the model. + + This field is a member of `oneof`_ ``_temperature``. + top_p (float): + Optional. The maximum cumulative probability of tokens to + consider when sampling. + + The model uses combined Top-k and nucleus sampling. + + Tokens are sorted based on their assigned probabilities so + that only the most likely tokens are considered. Top-k + sampling directly limits the maximum number of tokens to + consider, while Nucleus sampling limits number of tokens + based on the cumulative probability. + + Note: The default value varies by model, see the + ``Model.top_p`` attribute of the ``Model`` returned the + ``getModel`` function. + + This field is a member of `oneof`_ ``_top_p``. + top_k (int): + Optional. The maximum number of tokens to consider when + sampling. + + The model uses combined Top-k and nucleus sampling. + + Top-k sampling considers the set of ``top_k`` most probable + tokens. Defaults to 40. + + Note: The default value varies by model, see the + ``Model.top_k`` attribute of the ``Model`` returned the + ``getModel`` function. + + This field is a member of `oneof`_ ``_top_k``. + """ + + candidate_count: int = proto.Field( + proto.INT32, + number=1, + optional=True, + ) + stop_sequences: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=2, + ) + max_output_tokens: int = proto.Field( + proto.INT32, + number=4, + optional=True, + ) + temperature: float = proto.Field( + proto.FLOAT, + number=5, + optional=True, + ) + top_p: float = proto.Field( + proto.FLOAT, + number=6, + optional=True, + ) + top_k: int = proto.Field( + proto.INT32, + number=7, + optional=True, + ) + + +class SemanticRetrieverConfig(proto.Message): + r"""Configuration for retrieving grounding content from a ``Corpus`` or + ``Document`` created using the Semantic Retriever API. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + source (str): + Required. Name of the resource for retrieval, + e.g. corpora/123 or corpora/123/documents/abc. + query (google.ai.generativelanguage_v1beta.types.Content): + Required. Query to use for similarity matching ``Chunk``\ s + in the given resource. + metadata_filters (MutableSequence[google.ai.generativelanguage_v1beta.types.MetadataFilter]): + Optional. Filters for selecting ``Document``\ s and/or + ``Chunk``\ s from the resource. + max_chunks_count (int): + Optional. Maximum number of relevant ``Chunk``\ s to + retrieve. + + This field is a member of `oneof`_ ``_max_chunks_count``. + minimum_relevance_score (float): + Optional. Minimum relevance score for retrieved relevant + ``Chunk``\ s. + + This field is a member of `oneof`_ ``_minimum_relevance_score``. + """ + + source: str = proto.Field( + proto.STRING, + number=1, + ) + query: gag_content.Content = proto.Field( + proto.MESSAGE, + number=2, + message=gag_content.Content, + ) + metadata_filters: MutableSequence[retriever.MetadataFilter] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=retriever.MetadataFilter, + ) + max_chunks_count: int = proto.Field( + proto.INT32, + number=4, + optional=True, + ) + minimum_relevance_score: float = proto.Field( + proto.FLOAT, + number=5, + optional=True, + ) + + +class GenerateContentResponse(proto.Message): + r"""Response from the model supporting multiple candidates. + + Note on safety ratings and content filtering. They are reported for + both prompt in ``GenerateContentResponse.prompt_feedback`` and for + each candidate in ``finish_reason`` and in ``safety_ratings``. The + API contract is that: + + - either all requested candidates are returned or no candidates at + all + - no candidates are returned only if there was something wrong with + the prompt (see ``prompt_feedback``) + - feedback on each candidate is reported on ``finish_reason`` and + ``safety_ratings``. + + Attributes: + candidates (MutableSequence[google.ai.generativelanguage_v1beta.types.Candidate]): + Candidate responses from the model. + prompt_feedback (google.ai.generativelanguage_v1beta.types.GenerateContentResponse.PromptFeedback): + Returns the prompt's feedback related to the + content filters. + """ + + class PromptFeedback(proto.Message): + r"""A set of the feedback metadata the prompt specified in + ``GenerateContentRequest.content``. + + Attributes: + block_reason (google.ai.generativelanguage_v1beta.types.GenerateContentResponse.PromptFeedback.BlockReason): + Optional. If set, the prompt was blocked and + no candidates are returned. Rephrase your + prompt. + safety_ratings (MutableSequence[google.ai.generativelanguage_v1beta.types.SafetyRating]): + Ratings for safety of the prompt. + There is at most one rating per category. + """ + + class BlockReason(proto.Enum): + r"""Specifies what was the reason why prompt was blocked. + + Values: + BLOCK_REASON_UNSPECIFIED (0): + Default value. This value is unused. + SAFETY (1): + Prompt was blocked due to safety reasons. You can inspect + ``safety_ratings`` to understand which safety category + blocked it. + OTHER (2): + Prompt was blocked due to unknown reaasons. + """ + BLOCK_REASON_UNSPECIFIED = 0 + SAFETY = 1 + OTHER = 2 + + block_reason: "GenerateContentResponse.PromptFeedback.BlockReason" = ( + proto.Field( + proto.ENUM, + number=1, + enum="GenerateContentResponse.PromptFeedback.BlockReason", + ) + ) + safety_ratings: MutableSequence[safety.SafetyRating] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=safety.SafetyRating, + ) + + candidates: MutableSequence["Candidate"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="Candidate", + ) + prompt_feedback: PromptFeedback = proto.Field( + proto.MESSAGE, + number=2, + message=PromptFeedback, + ) + + +class Candidate(proto.Message): + r"""A response candidate generated from the model. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + index (int): + Output only. Index of the candidate in the + list of candidates. + + This field is a member of `oneof`_ ``_index``. + content (google.ai.generativelanguage_v1beta.types.Content): + Output only. Generated content returned from + the model. + finish_reason (google.ai.generativelanguage_v1beta.types.Candidate.FinishReason): + Optional. Output only. The reason why the + model stopped generating tokens. + If empty, the model has not stopped generating + the tokens. + safety_ratings (MutableSequence[google.ai.generativelanguage_v1beta.types.SafetyRating]): + List of ratings for the safety of a response + candidate. + There is at most one rating per category. + citation_metadata (google.ai.generativelanguage_v1beta.types.CitationMetadata): + Output only. Citation information for model-generated + candidate. + + This field may be populated with recitation information for + any text included in the ``content``. These are passages + that are "recited" from copyrighted material in the + foundational LLM's training data. + token_count (int): + Output only. Token count for this candidate. + grounding_attributions (MutableSequence[google.ai.generativelanguage_v1beta.types.GroundingAttribution]): + Output only. Attribution information for sources that + contributed to a grounded answer. + + This field is populated for ``GenerateAnswer`` calls. + """ + + class FinishReason(proto.Enum): + r"""Defines the reason why the model stopped generating tokens. + + Values: + FINISH_REASON_UNSPECIFIED (0): + Default value. This value is unused. + STOP (1): + Natural stop point of the model or provided + stop sequence. + MAX_TOKENS (2): + The maximum number of tokens as specified in + the request was reached. + SAFETY (3): + The candidate content was flagged for safety + reasons. + RECITATION (4): + The candidate content was flagged for + recitation reasons. + OTHER (5): + Unknown reason. + """ + FINISH_REASON_UNSPECIFIED = 0 + STOP = 1 + MAX_TOKENS = 2 + SAFETY = 3 + RECITATION = 4 + OTHER = 5 + + index: int = proto.Field( + proto.INT32, + number=3, + optional=True, + ) + content: gag_content.Content = proto.Field( + proto.MESSAGE, + number=1, + message=gag_content.Content, + ) + finish_reason: FinishReason = proto.Field( + proto.ENUM, + number=2, + enum=FinishReason, + ) + safety_ratings: MutableSequence[safety.SafetyRating] = proto.RepeatedField( + proto.MESSAGE, + number=5, + message=safety.SafetyRating, + ) + citation_metadata: citation.CitationMetadata = proto.Field( + proto.MESSAGE, + number=6, + message=citation.CitationMetadata, + ) + token_count: int = proto.Field( + proto.INT32, + number=7, + ) + grounding_attributions: MutableSequence[ + "GroundingAttribution" + ] = proto.RepeatedField( + proto.MESSAGE, + number=8, + message="GroundingAttribution", + ) + + +class AttributionSourceId(proto.Message): + r"""Identifier for the source contributing to this attribution. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + grounding_passage (google.ai.generativelanguage_v1beta.types.AttributionSourceId.GroundingPassageId): + Identifier for an inline passage. + + This field is a member of `oneof`_ ``source``. + semantic_retriever_chunk (google.ai.generativelanguage_v1beta.types.AttributionSourceId.SemanticRetrieverChunk): + Identifier for a ``Chunk`` fetched via Semantic Retriever. + + This field is a member of `oneof`_ ``source``. + """ + + class GroundingPassageId(proto.Message): + r"""Identifier for a part within a ``GroundingPassage``. + + Attributes: + passage_id (str): + Output only. ID of the passage matching the + ``GenerateAnswerRequest``'s ``GroundingPassage.id``. + part_index (int): + Output only. Index of the part within the + ``GenerateAnswerRequest``'s ``GroundingPassage.content``. + """ + + passage_id: str = proto.Field( + proto.STRING, + number=1, + ) + part_index: int = proto.Field( + proto.INT32, + number=2, + ) + + class SemanticRetrieverChunk(proto.Message): + r"""Identifier for a ``Chunk`` retrieved via Semantic Retriever + specified in the ``GenerateAnswerRequest`` using + ``SemanticRetrieverConfig``. + + Attributes: + source (str): + Output only. Name of the source matching the request's + ``SemanticRetrieverConfig.source``. Example: ``corpora/123`` + or ``corpora/123/documents/abc`` + chunk (str): + Output only. Name of the ``Chunk`` containing the attributed + text. Example: ``corpora/123/documents/abc/chunks/xyz`` + """ + + source: str = proto.Field( + proto.STRING, + number=1, + ) + chunk: str = proto.Field( + proto.STRING, + number=2, + ) + + grounding_passage: GroundingPassageId = proto.Field( + proto.MESSAGE, + number=1, + oneof="source", + message=GroundingPassageId, + ) + semantic_retriever_chunk: SemanticRetrieverChunk = proto.Field( + proto.MESSAGE, + number=2, + oneof="source", + message=SemanticRetrieverChunk, + ) + + +class GroundingAttribution(proto.Message): + r"""Attribution for a source that contributed to an answer. + + Attributes: + source_id (google.ai.generativelanguage_v1beta.types.AttributionSourceId): + Output only. Identifier for the source + contributing to this attribution. + content (google.ai.generativelanguage_v1beta.types.Content): + Grounding source content that makes up this + attribution. + """ + + source_id: "AttributionSourceId" = proto.Field( + proto.MESSAGE, + number=3, + message="AttributionSourceId", + ) + content: gag_content.Content = proto.Field( + proto.MESSAGE, + number=2, + message=gag_content.Content, + ) + + +class GenerateAnswerRequest(proto.Message): + r"""Request to generate a grounded answer from the model. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + inline_passages (google.ai.generativelanguage_v1beta.types.GroundingPassages): + Passages provided inline with the request. + + This field is a member of `oneof`_ ``grounding_source``. + semantic_retriever (google.ai.generativelanguage_v1beta.types.SemanticRetrieverConfig): + Content retrieved from resources created via + the Semantic Retriever API. + + This field is a member of `oneof`_ ``grounding_source``. + model (str): + Required. The name of the ``Model`` to use for generating + the grounded response. + + Format: ``model=models/{model}``. + contents (MutableSequence[google.ai.generativelanguage_v1beta.types.Content]): + Required. The content of the current conversation with the + model. For single-turn queries, this is a single question to + answer. For multi-turn queries, this is a repeated field + that contains conversation history and the last ``Content`` + in the list containing the question. + answer_style (google.ai.generativelanguage_v1beta.types.GenerateAnswerRequest.AnswerStyle): + Required. Style in which answers should be + returned. + safety_settings (MutableSequence[google.ai.generativelanguage_v1beta.types.SafetySetting]): + Optional. A list of unique ``SafetySetting`` instances for + blocking unsafe content. + + This will be enforced on the + ``GenerateAnswerRequest.contents`` and + ``GenerateAnswerResponse.candidate``. There should not be + more than one setting for each ``SafetyCategory`` type. The + API will block any contents and responses that fail to meet + the thresholds set by these settings. This list overrides + the default settings for each ``SafetyCategory`` specified + in the safety_settings. If there is no ``SafetySetting`` for + a given ``SafetyCategory`` provided in the list, the API + will use the default safety setting for that category. + temperature (float): + Optional. Controls the randomness of the output. + + Values can range from [0.0,1.0], inclusive. A value closer + to 1.0 will produce responses that are more varied and + creative, while a value closer to 0.0 will typically result + in more straightforward responses from the model. A low + temperature (~0.2) is usually recommended for + Attributed-Question-Answering use cases. + + This field is a member of `oneof`_ ``_temperature``. + """ + + class AnswerStyle(proto.Enum): + r"""Style for grounded answers. + + Values: + ANSWER_STYLE_UNSPECIFIED (0): + Unspecified answer style. + ABSTRACTIVE (1): + Succint but abstract style. + EXTRACTIVE (2): + Very brief and extractive style. + VERBOSE (3): + Verbose style including extra details. The + response may be formatted as a sentence, + paragraph, multiple paragraphs, or bullet + points, etc. + """ + ANSWER_STYLE_UNSPECIFIED = 0 + ABSTRACTIVE = 1 + EXTRACTIVE = 2 + VERBOSE = 3 + + inline_passages: gag_content.GroundingPassages = proto.Field( + proto.MESSAGE, + number=6, + oneof="grounding_source", + message=gag_content.GroundingPassages, + ) + semantic_retriever: "SemanticRetrieverConfig" = proto.Field( + proto.MESSAGE, + number=7, + oneof="grounding_source", + message="SemanticRetrieverConfig", + ) + model: str = proto.Field( + proto.STRING, + number=1, + ) + contents: MutableSequence[gag_content.Content] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=gag_content.Content, + ) + answer_style: AnswerStyle = proto.Field( + proto.ENUM, + number=5, + enum=AnswerStyle, + ) + safety_settings: MutableSequence[safety.SafetySetting] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=safety.SafetySetting, + ) + temperature: float = proto.Field( + proto.FLOAT, + number=4, + optional=True, + ) + + +class GenerateAnswerResponse(proto.Message): + r"""Response from the model for a grounded answer. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + answer (google.ai.generativelanguage_v1beta.types.Candidate): + Candidate answer from the model. + + Note: The model *always* attempts to provide a grounded + answer, even when the answer is unlikely to be answerable + from the given passages. In that case, a low-quality or + ungrounded answer may be provided, along with a low + ``answerable_probability``. + answerable_probability (float): + Output only. The model's estimate of the probability that + its answer is correct and grounded in the input passages. + + A low answerable_probability indicates that the answer might + not be grounded in the sources. + + When ``answerable_probability`` is low, some clients may + wish to: + + - Display a message to the effect of "We couldn’t answer + that question" to the user. + - Fall back to a general-purpose LLM that answers the + question from world knowledge. The threshold and nature + of such fallbacks will depend on individual clients’ use + cases. 0.5 is a good starting threshold. + + This field is a member of `oneof`_ ``_answerable_probability``. + """ + + answer: "Candidate" = proto.Field( + proto.MESSAGE, + number=1, + message="Candidate", + ) + answerable_probability: float = proto.Field( + proto.FLOAT, + number=2, + optional=True, + ) + + +class EmbedContentRequest(proto.Message): + r"""Request containing the ``Content`` for the model to embed. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + model (str): + Required. The model's resource name. This serves as an ID + for the Model to use. + + This name should match a model name returned by the + ``ListModels`` method. + + Format: ``models/{model}`` + content (google.ai.generativelanguage_v1beta.types.Content): + Required. The content to embed. Only the ``parts.text`` + fields will be counted. + task_type (google.ai.generativelanguage_v1beta.types.TaskType): + Optional. Optional task type for which the embeddings will + be used. Can only be set for ``models/embedding-001``. + + This field is a member of `oneof`_ ``_task_type``. + title (str): + Optional. An optional title for the text. Only applicable + when TaskType is ``RETRIEVAL_DOCUMENT``. + + This field is a member of `oneof`_ ``_title``. + """ + + model: str = proto.Field( + proto.STRING, + number=1, + ) + content: gag_content.Content = proto.Field( + proto.MESSAGE, + number=2, + message=gag_content.Content, + ) + task_type: "TaskType" = proto.Field( + proto.ENUM, + number=3, + optional=True, + enum="TaskType", + ) + title: str = proto.Field( + proto.STRING, + number=4, + optional=True, + ) + + +class ContentEmbedding(proto.Message): + r"""A list of floats representing an embedding. + + Attributes: + values (MutableSequence[float]): + The embedding values. + """ + + values: MutableSequence[float] = proto.RepeatedField( + proto.FLOAT, + number=1, + ) + + +class EmbedContentResponse(proto.Message): + r"""The response to an ``EmbedContentRequest``. + + Attributes: + embedding (google.ai.generativelanguage_v1beta.types.ContentEmbedding): + Output only. The embedding generated from the + input content. + """ + + embedding: "ContentEmbedding" = proto.Field( + proto.MESSAGE, + number=1, + message="ContentEmbedding", + ) + + +class BatchEmbedContentsRequest(proto.Message): + r"""Batch request to get embeddings from the model for a list of + prompts. + + Attributes: + model (str): + Required. The model's resource name. This serves as an ID + for the Model to use. + + This name should match a model name returned by the + ``ListModels`` method. + + Format: ``models/{model}`` + requests (MutableSequence[google.ai.generativelanguage_v1beta.types.EmbedContentRequest]): + Required. Embed requests for the batch. The model in each of + these requests must match the model specified + ``BatchEmbedContentsRequest.model``. + """ + + model: str = proto.Field( + proto.STRING, + number=1, + ) + requests: MutableSequence["EmbedContentRequest"] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message="EmbedContentRequest", + ) + + +class BatchEmbedContentsResponse(proto.Message): + r"""The response to a ``BatchEmbedContentsRequest``. + + Attributes: + embeddings (MutableSequence[google.ai.generativelanguage_v1beta.types.ContentEmbedding]): + Output only. The embeddings for each request, + in the same order as provided in the batch + request. + """ + + embeddings: MutableSequence["ContentEmbedding"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="ContentEmbedding", + ) + + +class CountTokensRequest(proto.Message): + r"""Counts the number of tokens in the ``prompt`` sent to a model. + + Models may tokenize text differently, so each model may return a + different ``token_count``. + + Attributes: + model (str): + Required. The model's resource name. This serves as an ID + for the Model to use. + + This name should match a model name returned by the + ``ListModels`` method. + + Format: ``models/{model}`` + contents (MutableSequence[google.ai.generativelanguage_v1beta.types.Content]): + Required. The input given to the model as a + prompt. + """ + + model: str = proto.Field( + proto.STRING, + number=1, + ) + contents: MutableSequence[gag_content.Content] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=gag_content.Content, + ) + + +class CountTokensResponse(proto.Message): + r"""A response from ``CountTokens``. + + It returns the model's ``token_count`` for the ``prompt``. + + Attributes: + total_tokens (int): + The number of tokens that the ``model`` tokenizes the + ``prompt`` into. + + Always non-negative. + """ + + total_tokens: int = proto.Field( + proto.INT32, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/model.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/model.py new file mode 100644 index 000000000000..977380fb6418 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/model.py @@ -0,0 +1,155 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +__protobuf__ = proto.module( + package="google.ai.generativelanguage.v1beta", + manifest={ + "Model", + }, +) + + +class Model(proto.Message): + r"""Information about a Generative Language Model. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + name (str): + Required. The resource name of the ``Model``. + + Format: ``models/{model}`` with a ``{model}`` naming + convention of: + + - "{base_model_id}-{version}" + + Examples: + + - ``models/chat-bison-001`` + base_model_id (str): + Required. The name of the base model, pass this to the + generation request. + + Examples: + + - ``chat-bison`` + version (str): + Required. The version number of the model. + + This represents the major version + display_name (str): + The human-readable name of the model. E.g. + "Chat Bison". + The name can be up to 128 characters long and + can consist of any UTF-8 characters. + description (str): + A short description of the model. + input_token_limit (int): + Maximum number of input tokens allowed for + this model. + output_token_limit (int): + Maximum number of output tokens available for + this model. + supported_generation_methods (MutableSequence[str]): + The model's supported generation methods. + + The method names are defined as Pascal case strings, such as + ``generateMessage`` which correspond to API methods. + temperature (float): + Controls the randomness of the output. + + Values can range over ``[0.0,1.0]``, inclusive. A value + closer to ``1.0`` will produce responses that are more + varied, while a value closer to ``0.0`` will typically + result in less surprising responses from the model. This + value specifies default to be used by the backend while + making the call to the model. + + This field is a member of `oneof`_ ``_temperature``. + top_p (float): + For Nucleus sampling. + + Nucleus sampling considers the smallest set of tokens whose + probability sum is at least ``top_p``. This value specifies + default to be used by the backend while making the call to + the model. + + This field is a member of `oneof`_ ``_top_p``. + top_k (int): + For Top-k sampling. + + Top-k sampling considers the set of ``top_k`` most probable + tokens. This value specifies default to be used by the + backend while making the call to the model. + + This field is a member of `oneof`_ ``_top_k``. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + base_model_id: str = proto.Field( + proto.STRING, + number=2, + ) + version: str = proto.Field( + proto.STRING, + number=3, + ) + display_name: str = proto.Field( + proto.STRING, + number=4, + ) + description: str = proto.Field( + proto.STRING, + number=5, + ) + input_token_limit: int = proto.Field( + proto.INT32, + number=6, + ) + output_token_limit: int = proto.Field( + proto.INT32, + number=7, + ) + supported_generation_methods: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=8, + ) + temperature: float = proto.Field( + proto.FLOAT, + number=9, + optional=True, + ) + top_p: float = proto.Field( + proto.FLOAT, + number=10, + optional=True, + ) + top_k: int = proto.Field( + proto.INT32, + number=11, + optional=True, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/model_service.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/model_service.py new file mode 100644 index 000000000000..ed2043541989 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/model_service.py @@ -0,0 +1,333 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +from google.protobuf import field_mask_pb2 # type: ignore +import proto # type: ignore + +from google.ai.generativelanguage_v1beta.types import tuned_model as gag_tuned_model +from google.ai.generativelanguage_v1beta.types import model + +__protobuf__ = proto.module( + package="google.ai.generativelanguage.v1beta", + manifest={ + "GetModelRequest", + "ListModelsRequest", + "ListModelsResponse", + "GetTunedModelRequest", + "ListTunedModelsRequest", + "ListTunedModelsResponse", + "CreateTunedModelRequest", + "CreateTunedModelMetadata", + "UpdateTunedModelRequest", + "DeleteTunedModelRequest", + }, +) + + +class GetModelRequest(proto.Message): + r"""Request for getting information about a specific Model. + + Attributes: + name (str): + Required. The resource name of the model. + + This name should match a model name returned by the + ``ListModels`` method. + + Format: ``models/{model}`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListModelsRequest(proto.Message): + r"""Request for listing all Models. + + Attributes: + page_size (int): + The maximum number of ``Models`` to return (per page). + + The service may return fewer models. If unspecified, at most + 50 models will be returned per page. This method returns at + most 1000 models per page, even if you pass a larger + page_size. + page_token (str): + A page token, received from a previous ``ListModels`` call. + + Provide the ``page_token`` returned by one request as an + argument to the next request to retrieve the next page. + + When paginating, all other parameters provided to + ``ListModels`` must match the call that provided the page + token. + """ + + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + + +class ListModelsResponse(proto.Message): + r"""Response from ``ListModel`` containing a paginated list of Models. + + Attributes: + models (MutableSequence[google.ai.generativelanguage_v1beta.types.Model]): + The returned Models. + next_page_token (str): + A token, which can be sent as ``page_token`` to retrieve the + next page. + + If this field is omitted, there are no more pages. + """ + + @property + def raw_page(self): + return self + + models: MutableSequence[model.Model] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=model.Model, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class GetTunedModelRequest(proto.Message): + r"""Request for getting information about a specific Model. + + Attributes: + name (str): + Required. The resource name of the model. + + Format: ``tunedModels/my-model-id`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListTunedModelsRequest(proto.Message): + r"""Request for listing TunedModels. + + Attributes: + page_size (int): + Optional. The maximum number of ``TunedModels`` to return + (per page). The service may return fewer tuned models. + + If unspecified, at most 10 tuned models will be returned. + This method returns at most 1000 models per page, even if + you pass a larger page_size. + page_token (str): + Optional. A page token, received from a previous + ``ListTunedModels`` call. + + Provide the ``page_token`` returned by one request as an + argument to the next request to retrieve the next page. + + When paginating, all other parameters provided to + ``ListTunedModels`` must match the call that provided the + page token. + filter (str): + Optional. A filter is a full text search over + the tuned model's description and display name. + By default, results will not include tuned + models shared with everyone. + + Additional operators: + + - owner:me + - writers:me + - readers:me + - readers:everyone + + Examples: + + "owner:me" returns all tuned models to which + caller has owner role "readers:me" returns all + tuned models to which caller has reader role + "readers:everyone" returns all tuned models that + are shared with everyone + """ + + page_size: int = proto.Field( + proto.INT32, + number=1, + ) + page_token: str = proto.Field( + proto.STRING, + number=2, + ) + filter: str = proto.Field( + proto.STRING, + number=3, + ) + + +class ListTunedModelsResponse(proto.Message): + r"""Response from ``ListTunedModels`` containing a paginated list of + Models. + + Attributes: + tuned_models (MutableSequence[google.ai.generativelanguage_v1beta.types.TunedModel]): + The returned Models. + next_page_token (str): + A token, which can be sent as ``page_token`` to retrieve the + next page. + + If this field is omitted, there are no more pages. + """ + + @property + def raw_page(self): + return self + + tuned_models: MutableSequence[gag_tuned_model.TunedModel] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gag_tuned_model.TunedModel, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class CreateTunedModelRequest(proto.Message): + r"""Request to create a TunedModel. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + tuned_model_id (str): + Optional. The unique id for the tuned model if specified. + This value should be up to 40 characters, the first + character must be a letter, the last could be a letter or a + number. The id must match the regular expression: + `a-z <[a-z0-9-]{0,38}[a-z0-9]>`__?. + + This field is a member of `oneof`_ ``_tuned_model_id``. + tuned_model (google.ai.generativelanguage_v1beta.types.TunedModel): + Required. The tuned model to create. + """ + + tuned_model_id: str = proto.Field( + proto.STRING, + number=1, + optional=True, + ) + tuned_model: gag_tuned_model.TunedModel = proto.Field( + proto.MESSAGE, + number=2, + message=gag_tuned_model.TunedModel, + ) + + +class CreateTunedModelMetadata(proto.Message): + r"""Metadata about the state and progress of creating a tuned + model returned from the long-running operation + + Attributes: + tuned_model (str): + Name of the tuned model associated with the + tuning operation. + total_steps (int): + The total number of tuning steps. + completed_steps (int): + The number of steps completed. + completed_percent (float): + The completed percentage for the tuning + operation. + snapshots (MutableSequence[google.ai.generativelanguage_v1beta.types.TuningSnapshot]): + Metrics collected during tuning. + """ + + tuned_model: str = proto.Field( + proto.STRING, + number=5, + ) + total_steps: int = proto.Field( + proto.INT32, + number=1, + ) + completed_steps: int = proto.Field( + proto.INT32, + number=2, + ) + completed_percent: float = proto.Field( + proto.FLOAT, + number=3, + ) + snapshots: MutableSequence[gag_tuned_model.TuningSnapshot] = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=gag_tuned_model.TuningSnapshot, + ) + + +class UpdateTunedModelRequest(proto.Message): + r"""Request to update a TunedModel. + + Attributes: + tuned_model (google.ai.generativelanguage_v1beta.types.TunedModel): + Required. The tuned model to update. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The list of fields to update. + """ + + tuned_model: gag_tuned_model.TunedModel = proto.Field( + proto.MESSAGE, + number=1, + message=gag_tuned_model.TunedModel, + ) + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class DeleteTunedModelRequest(proto.Message): + r"""Request to delete a TunedModel. + + Attributes: + name (str): + Required. The resource name of the model. Format: + ``tunedModels/my-model-id`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/permission.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/permission.py new file mode 100644 index 000000000000..06f02b924f66 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/permission.py @@ -0,0 +1,141 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +__protobuf__ = proto.module( + package="google.ai.generativelanguage.v1beta", + manifest={ + "Permission", + }, +) + + +class Permission(proto.Message): + r"""Permission resource grants user, group or the rest of the + world access to the PaLM API resource (e.g. a tuned model, + corpus). + + A role is a collection of permitted operations that allows users + to perform specific actions on PaLM API resources. To make them + available to users, groups, or service accounts, you assign + roles. When you assign a role, you grant permissions that the + role contains. + + There are three concentric roles. Each role is a superset of the + previous role's permitted operations: + + - reader can use the resource (e.g. tuned model, corpus) for + inference + - writer has reader's permissions and additionally can edit and + share + - owner has writer's permissions and additionally can delete + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + name (str): + Output only. Identifier. The permission name. A unique name + will be generated on create. Examples: + tunedModels/{tuned_model}/permissions/{permission} + corpora/{corpus}/permissions/{permission} Output only. + grantee_type (google.ai.generativelanguage_v1beta.types.Permission.GranteeType): + Optional. Immutable. The type of the grantee. + + This field is a member of `oneof`_ ``_grantee_type``. + email_address (str): + Optional. Immutable. The email address of the + user of group which this permission refers. + Field is not set when permission's grantee type + is EVERYONE. + + This field is a member of `oneof`_ ``_email_address``. + role (google.ai.generativelanguage_v1beta.types.Permission.Role): + Required. The role granted by this + permission. + + This field is a member of `oneof`_ ``_role``. + """ + + class GranteeType(proto.Enum): + r"""Defines types of the grantee of this permission. + + Values: + GRANTEE_TYPE_UNSPECIFIED (0): + The default value. This value is unused. + USER (1): + Represents a user. When set, you must provide email_address + for the user. + GROUP (2): + Represents a group. When set, you must provide email_address + for the group. + EVERYONE (3): + Represents access to everyone. No extra + information is required. + """ + GRANTEE_TYPE_UNSPECIFIED = 0 + USER = 1 + GROUP = 2 + EVERYONE = 3 + + class Role(proto.Enum): + r"""Defines the role granted by this permission. + + Values: + ROLE_UNSPECIFIED (0): + The default value. This value is unused. + OWNER (1): + Owner can use, update, share and delete the + resource. + WRITER (2): + Writer can use, update and share the + resource. + READER (3): + Reader can use the resource. + """ + ROLE_UNSPECIFIED = 0 + OWNER = 1 + WRITER = 2 + READER = 3 + + name: str = proto.Field( + proto.STRING, + number=1, + ) + grantee_type: GranteeType = proto.Field( + proto.ENUM, + number=2, + optional=True, + enum=GranteeType, + ) + email_address: str = proto.Field( + proto.STRING, + number=3, + optional=True, + ) + role: Role = proto.Field( + proto.ENUM, + number=4, + optional=True, + enum=Role, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/permission_service.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/permission_service.py new file mode 100644 index 000000000000..51be3d944fd4 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/permission_service.py @@ -0,0 +1,220 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +from google.protobuf import field_mask_pb2 # type: ignore +import proto # type: ignore + +from google.ai.generativelanguage_v1beta.types import permission as gag_permission + +__protobuf__ = proto.module( + package="google.ai.generativelanguage.v1beta", + manifest={ + "CreatePermissionRequest", + "GetPermissionRequest", + "ListPermissionsRequest", + "ListPermissionsResponse", + "UpdatePermissionRequest", + "DeletePermissionRequest", + "TransferOwnershipRequest", + "TransferOwnershipResponse", + }, +) + + +class CreatePermissionRequest(proto.Message): + r"""Request to create a ``Permission``. + + Attributes: + parent (str): + Required. The parent resource of the ``Permission``. + Formats: ``tunedModels/{tuned_model}`` ``corpora/{corpus}`` + permission (google.ai.generativelanguage_v1beta.types.Permission): + Required. The permission to create. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + permission: gag_permission.Permission = proto.Field( + proto.MESSAGE, + number=2, + message=gag_permission.Permission, + ) + + +class GetPermissionRequest(proto.Message): + r"""Request for getting information about a specific ``Permission``. + + Attributes: + name (str): + Required. The resource name of the permission. + + Formats: + ``tunedModels/{tuned_model}/permissions/{permission}`` + ``corpora/{corpus}/permissions/{permission}`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListPermissionsRequest(proto.Message): + r"""Request for listing permissions. + + Attributes: + parent (str): + Required. The parent resource of the permissions. Formats: + ``tunedModels/{tuned_model}`` ``corpora/{corpus}`` + page_size (int): + Optional. The maximum number of ``Permission``\ s to return + (per page). The service may return fewer permissions. + + If unspecified, at most 10 permissions will be returned. + This method returns at most 1000 permissions per page, even + if you pass larger page_size. + page_token (str): + Optional. A page token, received from a previous + ``ListPermissions`` call. + + Provide the ``page_token`` returned by one request as an + argument to the next request to retrieve the next page. + + When paginating, all other parameters provided to + ``ListPermissions`` must match the call that provided the + page token. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + + +class ListPermissionsResponse(proto.Message): + r"""Response from ``ListPermissions`` containing a paginated list of + permissions. + + Attributes: + permissions (MutableSequence[google.ai.generativelanguage_v1beta.types.Permission]): + Returned permissions. + next_page_token (str): + A token, which can be sent as ``page_token`` to retrieve the + next page. + + If this field is omitted, there are no more pages. + """ + + @property + def raw_page(self): + return self + + permissions: MutableSequence[gag_permission.Permission] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gag_permission.Permission, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdatePermissionRequest(proto.Message): + r"""Request to update the ``Permission``. + + Attributes: + permission (google.ai.generativelanguage_v1beta.types.Permission): + Required. The permission to update. + + The permission's ``name`` field is used to identify the + permission to update. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The list of fields to update. Accepted ones: + + - role (``Permission.role`` field) + """ + + permission: gag_permission.Permission = proto.Field( + proto.MESSAGE, + number=1, + message=gag_permission.Permission, + ) + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class DeletePermissionRequest(proto.Message): + r"""Request to delete the ``Permission``. + + Attributes: + name (str): + Required. The resource name of the permission. Formats: + ``tunedModels/{tuned_model}/permissions/{permission}`` + ``corpora/{corpus}/permissions/{permission}`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class TransferOwnershipRequest(proto.Message): + r"""Request to transfer the ownership of the tuned model. + + Attributes: + name (str): + Required. The resource name of the tuned model to transfer + ownership. + + Format: ``tunedModels/my-model-id`` + email_address (str): + Required. The email address of the user to + whom the tuned model is being transferred to. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + email_address: str = proto.Field( + proto.STRING, + number=2, + ) + + +class TransferOwnershipResponse(proto.Message): + r"""Response from ``TransferOwnership``.""" + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/retriever.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/retriever.py new file mode 100644 index 000000000000..3af2c6a1a58c --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/retriever.py @@ -0,0 +1,411 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +from google.protobuf import timestamp_pb2 # type: ignore +import proto # type: ignore + +__protobuf__ = proto.module( + package="google.ai.generativelanguage.v1beta", + manifest={ + "Corpus", + "Document", + "StringList", + "CustomMetadata", + "MetadataFilter", + "Condition", + "Chunk", + "ChunkData", + }, +) + + +class Corpus(proto.Message): + r"""A ``Corpus`` is a collection of ``Document``\ s. A project can + create up to 5 corpora. + + Attributes: + name (str): + Immutable. Identifier. The ``Corpus`` resource name. The ID + (name excluding the "corpora/" prefix) can contain up to 40 + characters that are lowercase alphanumeric or dashes (-). + The ID cannot start or end with a dash. If the name is empty + on create, a unique name will be derived from + ``display_name`` along with a 12 character random suffix. + Example: ``corpora/my-awesome-corpora-123a456b789c`` + display_name (str): + Optional. The human-readable display name for the + ``Corpus``. The display name must be no more than 128 + characters in length, including spaces. Example: "Docs on + Semantic Retriever". + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The Timestamp of when the ``Corpus`` was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The Timestamp of when the ``Corpus`` was last + updated. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + display_name: str = proto.Field( + proto.STRING, + number=2, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + update_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + + +class Document(proto.Message): + r"""A ``Document`` is a collection of ``Chunk``\ s. A ``Corpus`` can + have a maximum of 10,000 ``Document``\ s. + + Attributes: + name (str): + Immutable. Identifier. The ``Document`` resource name. The + ID (name excluding the `corpora/*/documents/` prefix) can + contain up to 40 characters that are lowercase alphanumeric + or dashes (-). The ID cannot start or end with a dash. If + the name is empty on create, a unique name will be derived + from ``display_name`` along with a 12 character random + suffix. Example: + ``corpora/{corpus_id}/documents/my-awesome-doc-123a456b789c`` + display_name (str): + Optional. The human-readable display name for the + ``Document``. The display name must be no more than 512 + characters in length, including spaces. Example: "Semantic + Retriever Documentation". + custom_metadata (MutableSequence[google.ai.generativelanguage_v1beta.types.CustomMetadata]): + Optional. User provided custom metadata stored as key-value + pairs used for querying. A ``Document`` can have a maximum + of 20 ``CustomMetadata``. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The Timestamp of when the ``Document`` was last + updated. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The Timestamp of when the ``Document`` was + created. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + display_name: str = proto.Field( + proto.STRING, + number=2, + ) + custom_metadata: MutableSequence["CustomMetadata"] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message="CustomMetadata", + ) + update_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + + +class StringList(proto.Message): + r"""User provided string values assigned to a single metadata + key. + + Attributes: + values (MutableSequence[str]): + The string values of the metadata to store. + """ + + values: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=1, + ) + + +class CustomMetadata(proto.Message): + r"""User provided metadata stored as key-value pairs. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + string_value (str): + The string value of the metadata to store. + + This field is a member of `oneof`_ ``value``. + string_list_value (google.ai.generativelanguage_v1beta.types.StringList): + The StringList value of the metadata to + store. + + This field is a member of `oneof`_ ``value``. + numeric_value (float): + The numeric value of the metadata to store. + + This field is a member of `oneof`_ ``value``. + key (str): + Required. The key of the metadata to store. + """ + + string_value: str = proto.Field( + proto.STRING, + number=2, + oneof="value", + ) + string_list_value: "StringList" = proto.Field( + proto.MESSAGE, + number=6, + oneof="value", + message="StringList", + ) + numeric_value: float = proto.Field( + proto.FLOAT, + number=7, + oneof="value", + ) + key: str = proto.Field( + proto.STRING, + number=1, + ) + + +class MetadataFilter(proto.Message): + r"""User provided filter to limit retrieval based on ``Chunk`` or + ``Document`` level metadata values. Example (genre = drama OR genre + = action): key = "document.custom_metadata.genre" conditions = + [{string_value = "drama", operation = EQUAL}, {string_value = + "action", operation = EQUAL}] + + Attributes: + key (str): + Required. The key of the metadata to filter + on. + conditions (MutableSequence[google.ai.generativelanguage_v1beta.types.Condition]): + Required. The ``Condition``\ s for the given key that will + trigger this filter. Multiple ``Condition``\ s are joined by + logical ORs. + """ + + key: str = proto.Field( + proto.STRING, + number=1, + ) + conditions: MutableSequence["Condition"] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message="Condition", + ) + + +class Condition(proto.Message): + r"""Filter condition applicable to a single key. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + string_value (str): + The string value to filter the metadata on. + + This field is a member of `oneof`_ ``value``. + numeric_value (float): + The numeric value to filter the metadata on. + + This field is a member of `oneof`_ ``value``. + operation (google.ai.generativelanguage_v1beta.types.Condition.Operator): + Required. Operator applied to the given + key-value pair to trigger the condition. + """ + + class Operator(proto.Enum): + r"""Defines the valid operators that can be applied to a + key-value pair. + + Values: + OPERATOR_UNSPECIFIED (0): + The default value. This value is unused. + LESS (1): + Supported by numeric. + LESS_EQUAL (2): + Supported by numeric. + EQUAL (3): + Supported by numeric & string. + GREATER_EQUAL (4): + Supported by numeric. + GREATER (5): + Supported by numeric. + NOT_EQUAL (6): + Supported by numeric & string. + INCLUDES (7): + Supported by string only when ``CustomMetadata`` value type + for the given key has a ``string_list_value``. + EXCLUDES (8): + Supported by string only when ``CustomMetadata`` value type + for the given key has a ``string_list_value``. + """ + OPERATOR_UNSPECIFIED = 0 + LESS = 1 + LESS_EQUAL = 2 + EQUAL = 3 + GREATER_EQUAL = 4 + GREATER = 5 + NOT_EQUAL = 6 + INCLUDES = 7 + EXCLUDES = 8 + + string_value: str = proto.Field( + proto.STRING, + number=1, + oneof="value", + ) + numeric_value: float = proto.Field( + proto.FLOAT, + number=6, + oneof="value", + ) + operation: Operator = proto.Field( + proto.ENUM, + number=5, + enum=Operator, + ) + + +class Chunk(proto.Message): + r"""A ``Chunk`` is a subpart of a ``Document`` that is treated as an + independent unit for the purposes of vector representation and + storage. A ``Corpus`` can have a maximum of 1 million ``Chunk``\ s. + + Attributes: + name (str): + Immutable. Identifier. The ``Chunk`` resource name. The ID + (name excluding the `corpora/*/documents/*/chunks/` prefix) + can contain up to 40 characters that are lowercase + alphanumeric or dashes (-). The ID cannot start or end with + a dash. If the name is empty on create, a random + 12-character unique ID will be generated. Example: + ``corpora/{corpus_id}/documents/{document_id}/chunks/123a456b789c`` + data (google.ai.generativelanguage_v1beta.types.ChunkData): + Required. The content for the ``Chunk``, such as the text + string. The maximum number of tokens per chunk is 2043. + custom_metadata (MutableSequence[google.ai.generativelanguage_v1beta.types.CustomMetadata]): + Optional. User provided custom metadata stored as key-value + pairs. The maximum number of ``CustomMetadata`` per chunk is + 20. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The Timestamp of when the ``Chunk`` was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The Timestamp of when the ``Chunk`` was last + updated. + state (google.ai.generativelanguage_v1beta.types.Chunk.State): + Output only. Current state of the ``Chunk``. + """ + + class State(proto.Enum): + r"""States for the lifecycle of a ``Chunk``. + + Values: + STATE_UNSPECIFIED (0): + The default value. This value is used if the + state is omitted. + STATE_PENDING_PROCESSING (1): + ``Chunk`` is being processed (embedding and vector storage). + STATE_ACTIVE (2): + ``Chunk`` is processed and available for querying. + STATE_FAILED (10): + ``Chunk`` failed processing. + """ + STATE_UNSPECIFIED = 0 + STATE_PENDING_PROCESSING = 1 + STATE_ACTIVE = 2 + STATE_FAILED = 10 + + name: str = proto.Field( + proto.STRING, + number=1, + ) + data: "ChunkData" = proto.Field( + proto.MESSAGE, + number=2, + message="ChunkData", + ) + custom_metadata: MutableSequence["CustomMetadata"] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message="CustomMetadata", + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + update_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + state: State = proto.Field( + proto.ENUM, + number=6, + enum=State, + ) + + +class ChunkData(proto.Message): + r"""Extracted data that represents the ``Chunk`` content. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + string_value (str): + The ``Chunk`` content as a string. The maximum number of + tokens per chunk is 2043. + + This field is a member of `oneof`_ ``data``. + """ + + string_value: str = proto.Field( + proto.STRING, + number=1, + oneof="data", + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/retriever_service.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/retriever_service.py new file mode 100644 index 000000000000..f55804da9a59 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/retriever_service.py @@ -0,0 +1,793 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +from google.protobuf import field_mask_pb2 # type: ignore +import proto # type: ignore + +from google.ai.generativelanguage_v1beta.types import retriever + +__protobuf__ = proto.module( + package="google.ai.generativelanguage.v1beta", + manifest={ + "CreateCorpusRequest", + "GetCorpusRequest", + "UpdateCorpusRequest", + "DeleteCorpusRequest", + "ListCorporaRequest", + "ListCorporaResponse", + "QueryCorpusRequest", + "QueryCorpusResponse", + "RelevantChunk", + "CreateDocumentRequest", + "GetDocumentRequest", + "UpdateDocumentRequest", + "DeleteDocumentRequest", + "ListDocumentsRequest", + "ListDocumentsResponse", + "QueryDocumentRequest", + "QueryDocumentResponse", + "CreateChunkRequest", + "BatchCreateChunksRequest", + "BatchCreateChunksResponse", + "GetChunkRequest", + "UpdateChunkRequest", + "BatchUpdateChunksRequest", + "BatchUpdateChunksResponse", + "DeleteChunkRequest", + "BatchDeleteChunksRequest", + "ListChunksRequest", + "ListChunksResponse", + }, +) + + +class CreateCorpusRequest(proto.Message): + r"""Request to create a ``Corpus``. + + Attributes: + corpus (google.ai.generativelanguage_v1beta.types.Corpus): + Required. The ``Corpus`` to create. + """ + + corpus: retriever.Corpus = proto.Field( + proto.MESSAGE, + number=1, + message=retriever.Corpus, + ) + + +class GetCorpusRequest(proto.Message): + r"""Request for getting information about a specific ``Corpus``. + + Attributes: + name (str): + Required. The name of the ``Corpus``. Example: + ``corpora/my-corpus-123`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class UpdateCorpusRequest(proto.Message): + r"""Request to update a ``Corpus``. + + Attributes: + corpus (google.ai.generativelanguage_v1beta.types.Corpus): + Required. The ``Corpus`` to update. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The list of fields to update. Currently, this only + supports updating ``display_name``. + """ + + corpus: retriever.Corpus = proto.Field( + proto.MESSAGE, + number=1, + message=retriever.Corpus, + ) + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class DeleteCorpusRequest(proto.Message): + r"""Request to delete a ``Corpus``. + + Attributes: + name (str): + Required. The resource name of the ``Corpus``. Example: + ``corpora/my-corpus-123`` + force (bool): + Optional. If set to true, any ``Document``\ s and objects + related to this ``Corpus`` will also be deleted. + + If false (the default), a ``FAILED_PRECONDITION`` error will + be returned if ``Corpus`` contains any ``Document``\ s. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + force: bool = proto.Field( + proto.BOOL, + number=2, + ) + + +class ListCorporaRequest(proto.Message): + r"""Request for listing ``Corpora``. + + Attributes: + page_size (int): + Optional. The maximum number of ``Corpora`` to return (per + page). The service may return fewer ``Corpora``. + + If unspecified, at most 10 ``Corpora`` will be returned. The + maximum size limit is 20 ``Corpora`` per page. + page_token (str): + Optional. A page token, received from a previous + ``ListCorpora`` call. + + Provide the ``next_page_token`` returned in the response as + an argument to the next request to retrieve the next page. + + When paginating, all other parameters provided to + ``ListCorpora`` must match the call that provided the page + token. + """ + + page_size: int = proto.Field( + proto.INT32, + number=1, + ) + page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class ListCorporaResponse(proto.Message): + r"""Response from ``ListCorpora`` containing a paginated list of + ``Corpora``. The results are sorted by ascending + ``corpus.create_time``. + + Attributes: + corpora (MutableSequence[google.ai.generativelanguage_v1beta.types.Corpus]): + The returned corpora. + next_page_token (str): + A token, which can be sent as ``page_token`` to retrieve the + next page. If this field is omitted, there are no more + pages. + """ + + @property + def raw_page(self): + return self + + corpora: MutableSequence[retriever.Corpus] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=retriever.Corpus, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class QueryCorpusRequest(proto.Message): + r"""Request for querying a ``Corpus``. + + Attributes: + name (str): + Required. The name of the ``Corpus`` to query. Example: + ``corpora/my-corpus-123`` + query (str): + Required. Query string to perform semantic + search. + metadata_filters (MutableSequence[google.ai.generativelanguage_v1beta.types.MetadataFilter]): + Optional. Filter for ``Chunk`` and ``Document`` metadata. + Each ``MetadataFilter`` object should correspond to a unique + key. Multiple ``MetadataFilter`` objects are joined by + logical "AND"s. + + Example query at document level: (year >= 2020 OR year < + 2010) AND (genre = drama OR genre = action) + + ``MetadataFilter`` object list: metadata_filters = [ {key = + "document.custom_metadata.year" conditions = [{int_value = + 2020, operation = GREATER_EQUAL}, {int_value = 2010, + operation = LESS}]}, {key = "document.custom_metadata.year" + conditions = [{int_value = 2020, operation = GREATER_EQUAL}, + {int_value = 2010, operation = LESS}]}, {key = + "document.custom_metadata.genre" conditions = [{string_value + = "drama", operation = EQUAL}, {string_value = "action", + operation = EQUAL}]}] + + Example query at chunk level for a numeric range of values: + (year > 2015 AND year <= 2020) + + ``MetadataFilter`` object list: metadata_filters = [ {key = + "chunk.custom_metadata.year" conditions = [{int_value = + 2015, operation = GREATER}]}, {key = + "chunk.custom_metadata.year" conditions = [{int_value = + 2020, operation = LESS_EQUAL}]}] + + Note: "AND"s for the same key are only supported for numeric + values. String values only support "OR"s for the same key. + results_count (int): + Optional. The maximum number of ``Chunk``\ s to return. The + service may return fewer ``Chunk``\ s. + + If unspecified, at most 10 ``Chunk``\ s will be returned. + The maximum specified result count is 100. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + query: str = proto.Field( + proto.STRING, + number=2, + ) + metadata_filters: MutableSequence[retriever.MetadataFilter] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=retriever.MetadataFilter, + ) + results_count: int = proto.Field( + proto.INT32, + number=4, + ) + + +class QueryCorpusResponse(proto.Message): + r"""Response from ``QueryCorpus`` containing a list of relevant chunks. + + Attributes: + relevant_chunks (MutableSequence[google.ai.generativelanguage_v1beta.types.RelevantChunk]): + The relevant chunks. + """ + + relevant_chunks: MutableSequence["RelevantChunk"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="RelevantChunk", + ) + + +class RelevantChunk(proto.Message): + r"""The information for a chunk relevant to a query. + + Attributes: + chunk_relevance_score (float): + ``Chunk`` relevance to the query. + chunk (google.ai.generativelanguage_v1beta.types.Chunk): + ``Chunk`` associated with the query. + """ + + chunk_relevance_score: float = proto.Field( + proto.FLOAT, + number=1, + ) + chunk: retriever.Chunk = proto.Field( + proto.MESSAGE, + number=2, + message=retriever.Chunk, + ) + + +class CreateDocumentRequest(proto.Message): + r"""Request to create a ``Document``. + + Attributes: + parent (str): + Required. The name of the ``Corpus`` where this ``Document`` + will be created. Example: ``corpora/my-corpus-123`` + document (google.ai.generativelanguage_v1beta.types.Document): + Required. The ``Document`` to create. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + document: retriever.Document = proto.Field( + proto.MESSAGE, + number=2, + message=retriever.Document, + ) + + +class GetDocumentRequest(proto.Message): + r"""Request for getting information about a specific ``Document``. + + Attributes: + name (str): + Required. The name of the ``Document`` to retrieve. Example: + ``corpora/my-corpus-123/documents/the-doc-abc`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class UpdateDocumentRequest(proto.Message): + r"""Request to update a ``Document``. + + Attributes: + document (google.ai.generativelanguage_v1beta.types.Document): + Required. The ``Document`` to update. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The list of fields to update. Currently, this only + supports updating ``display_name`` and ``custom_metadata``. + """ + + document: retriever.Document = proto.Field( + proto.MESSAGE, + number=1, + message=retriever.Document, + ) + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class DeleteDocumentRequest(proto.Message): + r"""Request to delete a ``Document``. + + Attributes: + name (str): + Required. The resource name of the ``Document`` to delete. + Example: ``corpora/my-corpus-123/documents/the-doc-abc`` + force (bool): + Optional. If set to true, any ``Chunk``\ s and objects + related to this ``Document`` will also be deleted. + + If false (the default), a ``FAILED_PRECONDITION`` error will + be returned if ``Document`` contains any ``Chunk``\ s. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + force: bool = proto.Field( + proto.BOOL, + number=2, + ) + + +class ListDocumentsRequest(proto.Message): + r"""Request for listing ``Document``\ s. + + Attributes: + parent (str): + Required. The name of the ``Corpus`` containing + ``Document``\ s. Example: ``corpora/my-corpus-123`` + page_size (int): + Optional. The maximum number of ``Document``\ s to return + (per page). The service may return fewer ``Document``\ s. + + If unspecified, at most 10 ``Document``\ s will be returned. + The maximum size limit is 20 ``Document``\ s per page. + page_token (str): + Optional. A page token, received from a previous + ``ListDocuments`` call. + + Provide the ``next_page_token`` returned in the response as + an argument to the next request to retrieve the next page. + + When paginating, all other parameters provided to + ``ListDocuments`` must match the call that provided the page + token. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + + +class ListDocumentsResponse(proto.Message): + r"""Response from ``ListDocuments`` containing a paginated list of + ``Document``\ s. The ``Document``\ s are sorted by ascending + ``document.create_time``. + + Attributes: + documents (MutableSequence[google.ai.generativelanguage_v1beta.types.Document]): + The returned ``Document``\ s. + next_page_token (str): + A token, which can be sent as ``page_token`` to retrieve the + next page. If this field is omitted, there are no more + pages. + """ + + @property + def raw_page(self): + return self + + documents: MutableSequence[retriever.Document] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=retriever.Document, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class QueryDocumentRequest(proto.Message): + r"""Request for querying a ``Document``. + + Attributes: + name (str): + Required. The name of the ``Document`` to query. Example: + ``corpora/my-corpus-123/documents/the-doc-abc`` + query (str): + Required. Query string to perform semantic + search. + results_count (int): + Optional. The maximum number of ``Chunk``\ s to return. The + service may return fewer ``Chunk``\ s. + + If unspecified, at most 10 ``Chunk``\ s will be returned. + The maximum specified result count is 100. + metadata_filters (MutableSequence[google.ai.generativelanguage_v1beta.types.MetadataFilter]): + Optional. Filter for ``Chunk`` metadata. Each + ``MetadataFilter`` object should correspond to a unique key. + Multiple ``MetadataFilter`` objects are joined by logical + "AND"s. + + Note: ``Document``-level filtering is not supported for this + request because a ``Document`` name is already specified. + + Example query: (year >= 2020 OR year < 2010) AND (genre = + drama OR genre = action) + + ``MetadataFilter`` object list: metadata_filters = [ {key = + "chunk.custom_metadata.year" conditions = [{int_value = + 2020, operation = GREATER_EQUAL}, {int_value = 2010, + operation = LESS}}, {key = "chunk.custom_metadata.genre" + conditions = [{string_value = "drama", operation = EQUAL}, + {string_value = "action", operation = EQUAL}}] + + Example query for a numeric range of values: (year > 2015 + AND year <= 2020) + + ``MetadataFilter`` object list: metadata_filters = [ {key = + "chunk.custom_metadata.year" conditions = [{int_value = + 2015, operation = GREATER}]}, {key = + "chunk.custom_metadata.year" conditions = [{int_value = + 2020, operation = LESS_EQUAL}]}] + + Note: "AND"s for the same key are only supported for numeric + values. String values only support "OR"s for the same key. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + query: str = proto.Field( + proto.STRING, + number=2, + ) + results_count: int = proto.Field( + proto.INT32, + number=3, + ) + metadata_filters: MutableSequence[retriever.MetadataFilter] = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=retriever.MetadataFilter, + ) + + +class QueryDocumentResponse(proto.Message): + r"""Response from ``QueryDocument`` containing a list of relevant + chunks. + + Attributes: + relevant_chunks (MutableSequence[google.ai.generativelanguage_v1beta.types.RelevantChunk]): + The returned relevant chunks. + """ + + relevant_chunks: MutableSequence["RelevantChunk"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="RelevantChunk", + ) + + +class CreateChunkRequest(proto.Message): + r"""Request to create a ``Chunk``. + + Attributes: + parent (str): + Required. The name of the ``Document`` where this ``Chunk`` + will be created. Example: + ``corpora/my-corpus-123/documents/the-doc-abc`` + chunk (google.ai.generativelanguage_v1beta.types.Chunk): + Required. The ``Chunk`` to create. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + chunk: retriever.Chunk = proto.Field( + proto.MESSAGE, + number=2, + message=retriever.Chunk, + ) + + +class BatchCreateChunksRequest(proto.Message): + r"""Request to batch create ``Chunk``\ s. + + Attributes: + parent (str): + Optional. The name of the ``Document`` where this batch of + ``Chunk``\ s will be created. The parent field in every + ``CreateChunkRequest`` must match this value. Example: + ``corpora/my-corpus-123/documents/the-doc-abc`` + requests (MutableSequence[google.ai.generativelanguage_v1beta.types.CreateChunkRequest]): + Required. The request messages specifying the ``Chunk``\ s + to create. A maximum of 100 ``Chunk``\ s can be created in a + batch. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + requests: MutableSequence["CreateChunkRequest"] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message="CreateChunkRequest", + ) + + +class BatchCreateChunksResponse(proto.Message): + r"""Response from ``BatchCreateChunks`` containing a list of created + ``Chunk``\ s. + + Attributes: + chunks (MutableSequence[google.ai.generativelanguage_v1beta.types.Chunk]): + ``Chunk``\ s created. + """ + + chunks: MutableSequence[retriever.Chunk] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=retriever.Chunk, + ) + + +class GetChunkRequest(proto.Message): + r"""Request for getting information about a specific ``Chunk``. + + Attributes: + name (str): + Required. The name of the ``Chunk`` to retrieve. Example: + ``corpora/my-corpus-123/documents/the-doc-abc/chunks/some-chunk`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class UpdateChunkRequest(proto.Message): + r"""Request to update a ``Chunk``. + + Attributes: + chunk (google.ai.generativelanguage_v1beta.types.Chunk): + Required. The ``Chunk`` to update. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The list of fields to update. Currently, this only + supports updating ``custom_metadata`` and ``data``. + """ + + chunk: retriever.Chunk = proto.Field( + proto.MESSAGE, + number=1, + message=retriever.Chunk, + ) + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class BatchUpdateChunksRequest(proto.Message): + r"""Request to batch update ``Chunk``\ s. + + Attributes: + parent (str): + Optional. The name of the ``Document`` containing the + ``Chunk``\ s to update. The parent field in every + ``UpdateChunkRequest`` must match this value. Example: + ``corpora/my-corpus-123/documents/the-doc-abc`` + requests (MutableSequence[google.ai.generativelanguage_v1beta.types.UpdateChunkRequest]): + Required. The request messages specifying the ``Chunk``\ s + to update. A maximum of 100 ``Chunk``\ s can be updated in a + batch. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + requests: MutableSequence["UpdateChunkRequest"] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message="UpdateChunkRequest", + ) + + +class BatchUpdateChunksResponse(proto.Message): + r"""Response from ``BatchUpdateChunks`` containing a list of updated + ``Chunk``\ s. + + Attributes: + chunks (MutableSequence[google.ai.generativelanguage_v1beta.types.Chunk]): + ``Chunk``\ s updated. + """ + + chunks: MutableSequence[retriever.Chunk] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=retriever.Chunk, + ) + + +class DeleteChunkRequest(proto.Message): + r"""Request to delete a ``Chunk``. + + Attributes: + name (str): + Required. The resource name of the ``Chunk`` to delete. + Example: + ``corpora/my-corpus-123/documents/the-doc-abc/chunks/some-chunk`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class BatchDeleteChunksRequest(proto.Message): + r"""Request to batch delete ``Chunk``\ s. + + Attributes: + parent (str): + Optional. The name of the ``Document`` containing the + ``Chunk``\ s to delete. The parent field in every + ``DeleteChunkRequest`` must match this value. Example: + ``corpora/my-corpus-123/documents/the-doc-abc`` + requests (MutableSequence[google.ai.generativelanguage_v1beta.types.DeleteChunkRequest]): + Required. The request messages specifying the ``Chunk``\ s + to delete. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + requests: MutableSequence["DeleteChunkRequest"] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message="DeleteChunkRequest", + ) + + +class ListChunksRequest(proto.Message): + r"""Request for listing ``Chunk``\ s. + + Attributes: + parent (str): + Required. The name of the ``Document`` containing + ``Chunk``\ s. Example: + ``corpora/my-corpus-123/documents/the-doc-abc`` + page_size (int): + Optional. The maximum number of ``Chunk``\ s to return (per + page). The service may return fewer ``Chunk``\ s. + + If unspecified, at most 10 ``Chunk``\ s will be returned. + The maximum size limit is 100 ``Chunk``\ s per page. + page_token (str): + Optional. A page token, received from a previous + ``ListChunks`` call. + + Provide the ``next_page_token`` returned in the response as + an argument to the next request to retrieve the next page. + + When paginating, all other parameters provided to + ``ListChunks`` must match the call that provided the page + token. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + + +class ListChunksResponse(proto.Message): + r"""Response from ``ListChunks`` containing a paginated list of + ``Chunk``\ s. The ``Chunk``\ s are sorted by ascending + ``chunk.create_time``. + + Attributes: + chunks (MutableSequence[google.ai.generativelanguage_v1beta.types.Chunk]): + The returned ``Chunk``\ s. + next_page_token (str): + A token, which can be sent as ``page_token`` to retrieve the + next page. If this field is omitted, there are no more + pages. + """ + + @property + def raw_page(self): + return self + + chunks: MutableSequence[retriever.Chunk] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=retriever.Chunk, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/safety.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/safety.py new file mode 100644 index 000000000000..30ce00d36f97 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/safety.py @@ -0,0 +1,271 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +__protobuf__ = proto.module( + package="google.ai.generativelanguage.v1beta", + manifest={ + "HarmCategory", + "ContentFilter", + "SafetyFeedback", + "SafetyRating", + "SafetySetting", + }, +) + + +class HarmCategory(proto.Enum): + r"""The category of a rating. + + These categories cover various kinds of harms that developers + may wish to adjust. + + Values: + HARM_CATEGORY_UNSPECIFIED (0): + Category is unspecified. + HARM_CATEGORY_DEROGATORY (1): + Negative or harmful comments targeting + identity and/or protected attribute. + HARM_CATEGORY_TOXICITY (2): + Content that is rude, disrepspectful, or + profane. + HARM_CATEGORY_VIOLENCE (3): + Describes scenarios depictng violence against + an individual or group, or general descriptions + of gore. + HARM_CATEGORY_SEXUAL (4): + Contains references to sexual acts or other + lewd content. + HARM_CATEGORY_MEDICAL (5): + Promotes unchecked medical advice. + HARM_CATEGORY_DANGEROUS (6): + Dangerous content that promotes, facilitates, + or encourages harmful acts. + HARM_CATEGORY_HARASSMENT (7): + Harasment content. + HARM_CATEGORY_HATE_SPEECH (8): + Hate speech and content. + HARM_CATEGORY_SEXUALLY_EXPLICIT (9): + Sexually explicit content. + HARM_CATEGORY_DANGEROUS_CONTENT (10): + Dangerous content. + """ + HARM_CATEGORY_UNSPECIFIED = 0 + HARM_CATEGORY_DEROGATORY = 1 + HARM_CATEGORY_TOXICITY = 2 + HARM_CATEGORY_VIOLENCE = 3 + HARM_CATEGORY_SEXUAL = 4 + HARM_CATEGORY_MEDICAL = 5 + HARM_CATEGORY_DANGEROUS = 6 + HARM_CATEGORY_HARASSMENT = 7 + HARM_CATEGORY_HATE_SPEECH = 8 + HARM_CATEGORY_SEXUALLY_EXPLICIT = 9 + HARM_CATEGORY_DANGEROUS_CONTENT = 10 + + +class ContentFilter(proto.Message): + r"""Content filtering metadata associated with processing a + single request. + ContentFilter contains a reason and an optional supporting + string. The reason may be unspecified. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + reason (google.ai.generativelanguage_v1beta.types.ContentFilter.BlockedReason): + The reason content was blocked during request + processing. + message (str): + A string that describes the filtering + behavior in more detail. + + This field is a member of `oneof`_ ``_message``. + """ + + class BlockedReason(proto.Enum): + r"""A list of reasons why content may have been blocked. + + Values: + BLOCKED_REASON_UNSPECIFIED (0): + A blocked reason was not specified. + SAFETY (1): + Content was blocked by safety settings. + OTHER (2): + Content was blocked, but the reason is + uncategorized. + """ + BLOCKED_REASON_UNSPECIFIED = 0 + SAFETY = 1 + OTHER = 2 + + reason: BlockedReason = proto.Field( + proto.ENUM, + number=1, + enum=BlockedReason, + ) + message: str = proto.Field( + proto.STRING, + number=2, + optional=True, + ) + + +class SafetyFeedback(proto.Message): + r"""Safety feedback for an entire request. + + This field is populated if content in the input and/or response + is blocked due to safety settings. SafetyFeedback may not exist + for every HarmCategory. Each SafetyFeedback will return the + safety settings used by the request as well as the lowest + HarmProbability that should be allowed in order to return a + result. + + Attributes: + rating (google.ai.generativelanguage_v1beta.types.SafetyRating): + Safety rating evaluated from content. + setting (google.ai.generativelanguage_v1beta.types.SafetySetting): + Safety settings applied to the request. + """ + + rating: "SafetyRating" = proto.Field( + proto.MESSAGE, + number=1, + message="SafetyRating", + ) + setting: "SafetySetting" = proto.Field( + proto.MESSAGE, + number=2, + message="SafetySetting", + ) + + +class SafetyRating(proto.Message): + r"""Safety rating for a piece of content. + + The safety rating contains the category of harm and the harm + probability level in that category for a piece of content. + Content is classified for safety across a number of harm + categories and the probability of the harm classification is + included here. + + Attributes: + category (google.ai.generativelanguage_v1beta.types.HarmCategory): + Required. The category for this rating. + probability (google.ai.generativelanguage_v1beta.types.SafetyRating.HarmProbability): + Required. The probability of harm for this + content. + blocked (bool): + Was this content blocked because of this + rating? + """ + + class HarmProbability(proto.Enum): + r"""The probability that a piece of content is harmful. + + The classification system gives the probability of the content + being unsafe. This does not indicate the severity of harm for a + piece of content. + + Values: + HARM_PROBABILITY_UNSPECIFIED (0): + Probability is unspecified. + NEGLIGIBLE (1): + Content has a negligible chance of being + unsafe. + LOW (2): + Content has a low chance of being unsafe. + MEDIUM (3): + Content has a medium chance of being unsafe. + HIGH (4): + Content has a high chance of being unsafe. + """ + HARM_PROBABILITY_UNSPECIFIED = 0 + NEGLIGIBLE = 1 + LOW = 2 + MEDIUM = 3 + HIGH = 4 + + category: "HarmCategory" = proto.Field( + proto.ENUM, + number=3, + enum="HarmCategory", + ) + probability: HarmProbability = proto.Field( + proto.ENUM, + number=4, + enum=HarmProbability, + ) + blocked: bool = proto.Field( + proto.BOOL, + number=5, + ) + + +class SafetySetting(proto.Message): + r"""Safety setting, affecting the safety-blocking behavior. + + Passing a safety setting for a category changes the allowed + proability that content is blocked. + + Attributes: + category (google.ai.generativelanguage_v1beta.types.HarmCategory): + Required. The category for this setting. + threshold (google.ai.generativelanguage_v1beta.types.SafetySetting.HarmBlockThreshold): + Required. Controls the probability threshold + at which harm is blocked. + """ + + class HarmBlockThreshold(proto.Enum): + r"""Block at and beyond a specified harm probability. + + Values: + HARM_BLOCK_THRESHOLD_UNSPECIFIED (0): + Threshold is unspecified. + BLOCK_LOW_AND_ABOVE (1): + Content with NEGLIGIBLE will be allowed. + BLOCK_MEDIUM_AND_ABOVE (2): + Content with NEGLIGIBLE and LOW will be + allowed. + BLOCK_ONLY_HIGH (3): + Content with NEGLIGIBLE, LOW, and MEDIUM will + be allowed. + BLOCK_NONE (4): + All content will be allowed. + """ + HARM_BLOCK_THRESHOLD_UNSPECIFIED = 0 + BLOCK_LOW_AND_ABOVE = 1 + BLOCK_MEDIUM_AND_ABOVE = 2 + BLOCK_ONLY_HIGH = 3 + BLOCK_NONE = 4 + + category: "HarmCategory" = proto.Field( + proto.ENUM, + number=3, + enum="HarmCategory", + ) + threshold: HarmBlockThreshold = proto.Field( + proto.ENUM, + number=4, + enum=HarmBlockThreshold, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/text_service.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/text_service.py new file mode 100644 index 000000000000..0b0ba3e53191 --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/text_service.py @@ -0,0 +1,441 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.ai.generativelanguage_v1beta.types import citation, safety + +__protobuf__ = proto.module( + package="google.ai.generativelanguage.v1beta", + manifest={ + "GenerateTextRequest", + "GenerateTextResponse", + "TextPrompt", + "TextCompletion", + "EmbedTextRequest", + "EmbedTextResponse", + "BatchEmbedTextRequest", + "BatchEmbedTextResponse", + "Embedding", + "CountTextTokensRequest", + "CountTextTokensResponse", + }, +) + + +class GenerateTextRequest(proto.Message): + r"""Request to generate a text completion response from the + model. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + model (str): + Required. The name of the ``Model`` or ``TunedModel`` to use + for generating the completion. Examples: + models/text-bison-001 tunedModels/sentence-translator-u3b7m + prompt (google.ai.generativelanguage_v1beta.types.TextPrompt): + Required. The free-form input text given to + the model as a prompt. + Given a prompt, the model will generate a + TextCompletion response it predicts as the + completion of the input text. + temperature (float): + Optional. Controls the randomness of the output. Note: The + default value varies by model, see the ``Model.temperature`` + attribute of the ``Model`` returned the ``getModel`` + function. + + Values can range from [0.0,1.0], inclusive. A value closer + to 1.0 will produce responses that are more varied and + creative, while a value closer to 0.0 will typically result + in more straightforward responses from the model. + + This field is a member of `oneof`_ ``_temperature``. + candidate_count (int): + Optional. Number of generated responses to return. + + This value must be between [1, 8], inclusive. If unset, this + will default to 1. + + This field is a member of `oneof`_ ``_candidate_count``. + max_output_tokens (int): + Optional. The maximum number of tokens to include in a + candidate. + + If unset, this will default to output_token_limit specified + in the ``Model`` specification. + + This field is a member of `oneof`_ ``_max_output_tokens``. + top_p (float): + Optional. The maximum cumulative probability of tokens to + consider when sampling. + + The model uses combined Top-k and nucleus sampling. + + Tokens are sorted based on their assigned probabilities so + that only the most likely tokens are considered. Top-k + sampling directly limits the maximum number of tokens to + consider, while Nucleus sampling limits number of tokens + based on the cumulative probability. + + Note: The default value varies by model, see the + ``Model.top_p`` attribute of the ``Model`` returned the + ``getModel`` function. + + This field is a member of `oneof`_ ``_top_p``. + top_k (int): + Optional. The maximum number of tokens to consider when + sampling. + + The model uses combined Top-k and nucleus sampling. + + Top-k sampling considers the set of ``top_k`` most probable + tokens. Defaults to 40. + + Note: The default value varies by model, see the + ``Model.top_k`` attribute of the ``Model`` returned the + ``getModel`` function. + + This field is a member of `oneof`_ ``_top_k``. + safety_settings (MutableSequence[google.ai.generativelanguage_v1beta.types.SafetySetting]): + Optional. A list of unique ``SafetySetting`` instances for + blocking unsafe content. + + that will be enforced on the ``GenerateTextRequest.prompt`` + and ``GenerateTextResponse.candidates``. There should not be + more than one setting for each ``SafetyCategory`` type. The + API will block any prompts and responses that fail to meet + the thresholds set by these settings. This list overrides + the default settings for each ``SafetyCategory`` specified + in the safety_settings. If there is no ``SafetySetting`` for + a given ``SafetyCategory`` provided in the list, the API + will use the default safety setting for that category. Harm + categories HARM_CATEGORY_DEROGATORY, HARM_CATEGORY_TOXICITY, + HARM_CATEGORY_VIOLENCE, HARM_CATEGORY_SEXUAL, + HARM_CATEGORY_MEDICAL, HARM_CATEGORY_DANGEROUS are supported + in text service. + stop_sequences (MutableSequence[str]): + The set of character sequences (up to 5) that + will stop output generation. If specified, the + API will stop at the first appearance of a stop + sequence. The stop sequence will not be included + as part of the response. + """ + + model: str = proto.Field( + proto.STRING, + number=1, + ) + prompt: "TextPrompt" = proto.Field( + proto.MESSAGE, + number=2, + message="TextPrompt", + ) + temperature: float = proto.Field( + proto.FLOAT, + number=3, + optional=True, + ) + candidate_count: int = proto.Field( + proto.INT32, + number=4, + optional=True, + ) + max_output_tokens: int = proto.Field( + proto.INT32, + number=5, + optional=True, + ) + top_p: float = proto.Field( + proto.FLOAT, + number=6, + optional=True, + ) + top_k: int = proto.Field( + proto.INT32, + number=7, + optional=True, + ) + safety_settings: MutableSequence[safety.SafetySetting] = proto.RepeatedField( + proto.MESSAGE, + number=8, + message=safety.SafetySetting, + ) + stop_sequences: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=9, + ) + + +class GenerateTextResponse(proto.Message): + r"""The response from the model, including candidate completions. + + Attributes: + candidates (MutableSequence[google.ai.generativelanguage_v1beta.types.TextCompletion]): + Candidate responses from the model. + filters (MutableSequence[google.ai.generativelanguage_v1beta.types.ContentFilter]): + A set of content filtering metadata for the prompt and + response text. + + This indicates which ``SafetyCategory``\ (s) blocked a + candidate from this response, the lowest ``HarmProbability`` + that triggered a block, and the HarmThreshold setting for + that category. This indicates the smallest change to the + ``SafetySettings`` that would be necessary to unblock at + least 1 response. + + The blocking is configured by the ``SafetySettings`` in the + request (or the default ``SafetySettings`` of the API). + safety_feedback (MutableSequence[google.ai.generativelanguage_v1beta.types.SafetyFeedback]): + Returns any safety feedback related to + content filtering. + """ + + candidates: MutableSequence["TextCompletion"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="TextCompletion", + ) + filters: MutableSequence[safety.ContentFilter] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=safety.ContentFilter, + ) + safety_feedback: MutableSequence[safety.SafetyFeedback] = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=safety.SafetyFeedback, + ) + + +class TextPrompt(proto.Message): + r"""Text given to the model as a prompt. + + The Model will use this TextPrompt to Generate a text + completion. + + Attributes: + text (str): + Required. The prompt text. + """ + + text: str = proto.Field( + proto.STRING, + number=1, + ) + + +class TextCompletion(proto.Message): + r"""Output text returned from a model. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + output (str): + Output only. The generated text returned from + the model. + safety_ratings (MutableSequence[google.ai.generativelanguage_v1beta.types.SafetyRating]): + Ratings for the safety of a response. + + There is at most one rating per category. + citation_metadata (google.ai.generativelanguage_v1beta.types.CitationMetadata): + Output only. Citation information for model-generated + ``output`` in this ``TextCompletion``. + + This field may be populated with attribution information for + any text included in the ``output``. + + This field is a member of `oneof`_ ``_citation_metadata``. + """ + + output: str = proto.Field( + proto.STRING, + number=1, + ) + safety_ratings: MutableSequence[safety.SafetyRating] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=safety.SafetyRating, + ) + citation_metadata: citation.CitationMetadata = proto.Field( + proto.MESSAGE, + number=3, + optional=True, + message=citation.CitationMetadata, + ) + + +class EmbedTextRequest(proto.Message): + r"""Request to get a text embedding from the model. + + Attributes: + model (str): + Required. The model name to use with the + format model=models/{model}. + text (str): + Optional. The free-form input text that the + model will turn into an embedding. + """ + + model: str = proto.Field( + proto.STRING, + number=1, + ) + text: str = proto.Field( + proto.STRING, + number=2, + ) + + +class EmbedTextResponse(proto.Message): + r"""The response to a EmbedTextRequest. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + embedding (google.ai.generativelanguage_v1beta.types.Embedding): + Output only. The embedding generated from the + input text. + + This field is a member of `oneof`_ ``_embedding``. + """ + + embedding: "Embedding" = proto.Field( + proto.MESSAGE, + number=1, + optional=True, + message="Embedding", + ) + + +class BatchEmbedTextRequest(proto.Message): + r"""Batch request to get a text embedding from the model. + + Attributes: + model (str): + Required. The name of the ``Model`` to use for generating + the embedding. Examples: models/embedding-gecko-001 + texts (MutableSequence[str]): + Optional. The free-form input texts that the + model will turn into an embedding. The current + limit is 100 texts, over which an error will be + thrown. + requests (MutableSequence[google.ai.generativelanguage_v1beta.types.EmbedTextRequest]): + Optional. Embed requests for the batch. Only one of + ``texts`` or ``requests`` can be set. + """ + + model: str = proto.Field( + proto.STRING, + number=1, + ) + texts: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=2, + ) + requests: MutableSequence["EmbedTextRequest"] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message="EmbedTextRequest", + ) + + +class BatchEmbedTextResponse(proto.Message): + r"""The response to a EmbedTextRequest. + + Attributes: + embeddings (MutableSequence[google.ai.generativelanguage_v1beta.types.Embedding]): + Output only. The embeddings generated from + the input text. + """ + + embeddings: MutableSequence["Embedding"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="Embedding", + ) + + +class Embedding(proto.Message): + r"""A list of floats representing the embedding. + + Attributes: + value (MutableSequence[float]): + The embedding values. + """ + + value: MutableSequence[float] = proto.RepeatedField( + proto.FLOAT, + number=1, + ) + + +class CountTextTokensRequest(proto.Message): + r"""Counts the number of tokens in the ``prompt`` sent to a model. + + Models may tokenize text differently, so each model may return a + different ``token_count``. + + Attributes: + model (str): + Required. The model's resource name. This serves as an ID + for the Model to use. + + This name should match a model name returned by the + ``ListModels`` method. + + Format: ``models/{model}`` + prompt (google.ai.generativelanguage_v1beta.types.TextPrompt): + Required. The free-form input text given to + the model as a prompt. + """ + + model: str = proto.Field( + proto.STRING, + number=1, + ) + prompt: "TextPrompt" = proto.Field( + proto.MESSAGE, + number=2, + message="TextPrompt", + ) + + +class CountTextTokensResponse(proto.Message): + r"""A response from ``CountTextTokens``. + + It returns the model's ``token_count`` for the ``prompt``. + + Attributes: + token_count (int): + The number of tokens that the ``model`` tokenizes the + ``prompt`` into. + + Always non-negative. + """ + + token_count: int = proto.Field( + proto.INT32, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/tuned_model.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/tuned_model.py new file mode 100644 index 000000000000..d57bbc3262cd --- /dev/null +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/types/tuned_model.py @@ -0,0 +1,413 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +from google.protobuf import timestamp_pb2 # type: ignore +import proto # type: ignore + +__protobuf__ = proto.module( + package="google.ai.generativelanguage.v1beta", + manifest={ + "TunedModel", + "TunedModelSource", + "TuningTask", + "Hyperparameters", + "Dataset", + "TuningExamples", + "TuningExample", + "TuningSnapshot", + }, +) + + +class TunedModel(proto.Message): + r"""A fine-tuned model created using + ModelService.CreateTunedModel. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + tuned_model_source (google.ai.generativelanguage_v1beta.types.TunedModelSource): + Optional. TunedModel to use as the starting + point for training the new model. + + This field is a member of `oneof`_ ``source_model``. + base_model (str): + Immutable. The name of the ``Model`` to tune. Example: + ``models/text-bison-001`` + + This field is a member of `oneof`_ ``source_model``. + name (str): + Output only. The tuned model name. A unique name will be + generated on create. Example: ``tunedModels/az2mb0bpw6i`` If + display_name is set on create, the id portion of the name + will be set by concatenating the words of the display_name + with hyphens and adding a random portion for uniqueness. + Example: display_name = "Sentence Translator" name = + "tunedModels/sentence-translator-u3b7m". + display_name (str): + Optional. The name to display for this model + in user interfaces. The display name must be up + to 40 characters including spaces. + description (str): + Optional. A short description of this model. + temperature (float): + Optional. Controls the randomness of the output. + + Values can range over ``[0.0,1.0]``, inclusive. A value + closer to ``1.0`` will produce responses that are more + varied, while a value closer to ``0.0`` will typically + result in less surprising responses from the model. + + This value specifies default to be the one used by the base + model while creating the model. + + This field is a member of `oneof`_ ``_temperature``. + top_p (float): + Optional. For Nucleus sampling. + + Nucleus sampling considers the smallest set of tokens whose + probability sum is at least ``top_p``. + + This value specifies default to be the one used by the base + model while creating the model. + + This field is a member of `oneof`_ ``_top_p``. + top_k (int): + Optional. For Top-k sampling. + + Top-k sampling considers the set of ``top_k`` most probable + tokens. This value specifies default to be used by the + backend while making the call to the model. + + This value specifies default to be the one used by the base + model while creating the model. + + This field is a member of `oneof`_ ``_top_k``. + state (google.ai.generativelanguage_v1beta.types.TunedModel.State): + Output only. The state of the tuned model. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The timestamp when this model + was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The timestamp when this model + was updated. + tuning_task (google.ai.generativelanguage_v1beta.types.TuningTask): + Required. The tuning task that creates the + tuned model. + """ + + class State(proto.Enum): + r"""The state of the tuned model. + + Values: + STATE_UNSPECIFIED (0): + The default value. This value is unused. + CREATING (1): + The model is being created. + ACTIVE (2): + The model is ready to be used. + FAILED (3): + The model failed to be created. + """ + STATE_UNSPECIFIED = 0 + CREATING = 1 + ACTIVE = 2 + FAILED = 3 + + tuned_model_source: "TunedModelSource" = proto.Field( + proto.MESSAGE, + number=3, + oneof="source_model", + message="TunedModelSource", + ) + base_model: str = proto.Field( + proto.STRING, + number=4, + oneof="source_model", + ) + name: str = proto.Field( + proto.STRING, + number=1, + ) + display_name: str = proto.Field( + proto.STRING, + number=5, + ) + description: str = proto.Field( + proto.STRING, + number=6, + ) + temperature: float = proto.Field( + proto.FLOAT, + number=11, + optional=True, + ) + top_p: float = proto.Field( + proto.FLOAT, + number=12, + optional=True, + ) + top_k: int = proto.Field( + proto.INT32, + number=13, + optional=True, + ) + state: State = proto.Field( + proto.ENUM, + number=7, + enum=State, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=8, + message=timestamp_pb2.Timestamp, + ) + update_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=9, + message=timestamp_pb2.Timestamp, + ) + tuning_task: "TuningTask" = proto.Field( + proto.MESSAGE, + number=10, + message="TuningTask", + ) + + +class TunedModelSource(proto.Message): + r"""Tuned model as a source for training a new model. + + Attributes: + tuned_model (str): + Immutable. The name of the ``TunedModel`` to use as the + starting point for training the new model. Example: + ``tunedModels/my-tuned-model`` + base_model (str): + Output only. The name of the base ``Model`` this + ``TunedModel`` was tuned from. Example: + ``models/text-bison-001`` + """ + + tuned_model: str = proto.Field( + proto.STRING, + number=1, + ) + base_model: str = proto.Field( + proto.STRING, + number=2, + ) + + +class TuningTask(proto.Message): + r"""Tuning tasks that create tuned models. + + Attributes: + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The timestamp when tuning this + model started. + complete_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The timestamp when tuning this + model completed. + snapshots (MutableSequence[google.ai.generativelanguage_v1beta.types.TuningSnapshot]): + Output only. Metrics collected during tuning. + training_data (google.ai.generativelanguage_v1beta.types.Dataset): + Required. Input only. Immutable. The model + training data. + hyperparameters (google.ai.generativelanguage_v1beta.types.Hyperparameters): + Immutable. Hyperparameters controlling the + tuning process. If not provided, default values + will be used. + """ + + start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=1, + message=timestamp_pb2.Timestamp, + ) + complete_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + snapshots: MutableSequence["TuningSnapshot"] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message="TuningSnapshot", + ) + training_data: "Dataset" = proto.Field( + proto.MESSAGE, + number=4, + message="Dataset", + ) + hyperparameters: "Hyperparameters" = proto.Field( + proto.MESSAGE, + number=5, + message="Hyperparameters", + ) + + +class Hyperparameters(proto.Message): + r"""Hyperparameters controlling the tuning process. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + epoch_count (int): + Immutable. The number of training epochs. An + epoch is one pass through the training data. If + not set, a default of 10 will be used. + + This field is a member of `oneof`_ ``_epoch_count``. + batch_size (int): + Immutable. The batch size hyperparameter for + tuning. If not set, a default of 16 or 64 will + be used based on the number of training + examples. + + This field is a member of `oneof`_ ``_batch_size``. + learning_rate (float): + Immutable. The learning rate hyperparameter + for tuning. If not set, a default of 0.0002 or + 0.002 will be calculated based on the number of + training examples. + + This field is a member of `oneof`_ ``_learning_rate``. + """ + + epoch_count: int = proto.Field( + proto.INT32, + number=14, + optional=True, + ) + batch_size: int = proto.Field( + proto.INT32, + number=15, + optional=True, + ) + learning_rate: float = proto.Field( + proto.FLOAT, + number=16, + optional=True, + ) + + +class Dataset(proto.Message): + r"""Dataset for training or validation. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + examples (google.ai.generativelanguage_v1beta.types.TuningExamples): + Optional. Inline examples. + + This field is a member of `oneof`_ ``dataset``. + """ + + examples: "TuningExamples" = proto.Field( + proto.MESSAGE, + number=1, + oneof="dataset", + message="TuningExamples", + ) + + +class TuningExamples(proto.Message): + r"""A set of tuning examples. Can be training or validatation + data. + + Attributes: + examples (MutableSequence[google.ai.generativelanguage_v1beta.types.TuningExample]): + Required. The examples. Example input can be + for text or discuss, but all examples in a set + must be of the same type. + """ + + examples: MutableSequence["TuningExample"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="TuningExample", + ) + + +class TuningExample(proto.Message): + r"""A single example for tuning. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + text_input (str): + Optional. Text model input. + + This field is a member of `oneof`_ ``model_input``. + output (str): + Required. The expected model output. + """ + + text_input: str = proto.Field( + proto.STRING, + number=1, + oneof="model_input", + ) + output: str = proto.Field( + proto.STRING, + number=3, + ) + + +class TuningSnapshot(proto.Message): + r"""Record for a single tuning step. + + Attributes: + step (int): + Output only. The tuning step. + epoch (int): + Output only. The epoch this step was part of. + mean_loss (float): + Output only. The mean loss of the training + examples for this step. + compute_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The timestamp when this metric + was computed. + """ + + step: int = proto.Field( + proto.INT32, + number=1, + ) + epoch: int = proto.Field( + proto.INT32, + number=2, + ) + mean_loss: float = proto.Field( + proto.FLOAT, + number=3, + ) + compute_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta2/gapic_version.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta2/gapic_version.py index 288d10b11145..360a0d13ebdd 100644 --- a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta2/gapic_version.py +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "0.3.5" # {x-release-please-version} +__version__ = "0.0.0" # {x-release-please-version} diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta3/gapic_version.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta3/gapic_version.py index 288d10b11145..360a0d13ebdd 100644 --- a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta3/gapic_version.py +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta3/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "0.3.5" # {x-release-please-version} +__version__ = "0.0.0" # {x-release-please-version} diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_batch_embed_contents_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_batch_embed_contents_async.py new file mode 100644 index 000000000000..888e16ab2eab --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_batch_embed_contents_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchEmbedContents +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1_generated_GenerativeService_BatchEmbedContents_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1 + + +async def sample_batch_embed_contents(): + # Create a client + client = generativelanguage_v1.GenerativeServiceAsyncClient() + + # Initialize request argument(s) + requests = generativelanguage_v1.EmbedContentRequest() + requests.model = "model_value" + + request = generativelanguage_v1.BatchEmbedContentsRequest( + model="model_value", + requests=requests, + ) + + # Make the request + response = await client.batch_embed_contents(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1_generated_GenerativeService_BatchEmbedContents_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_batch_embed_contents_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_batch_embed_contents_sync.py new file mode 100644 index 000000000000..2e395bd54cac --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_batch_embed_contents_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchEmbedContents +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1_generated_GenerativeService_BatchEmbedContents_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1 + + +def sample_batch_embed_contents(): + # Create a client + client = generativelanguage_v1.GenerativeServiceClient() + + # Initialize request argument(s) + requests = generativelanguage_v1.EmbedContentRequest() + requests.model = "model_value" + + request = generativelanguage_v1.BatchEmbedContentsRequest( + model="model_value", + requests=requests, + ) + + # Make the request + response = client.batch_embed_contents(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1_generated_GenerativeService_BatchEmbedContents_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_count_tokens_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_count_tokens_async.py new file mode 100644 index 000000000000..cc99212bd07f --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_count_tokens_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CountTokens +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1_generated_GenerativeService_CountTokens_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1 + + +async def sample_count_tokens(): + # Create a client + client = generativelanguage_v1.GenerativeServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1.CountTokensRequest( + model="model_value", + ) + + # Make the request + response = await client.count_tokens(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1_generated_GenerativeService_CountTokens_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_count_tokens_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_count_tokens_sync.py new file mode 100644 index 000000000000..0eb80d8eb4ef --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_count_tokens_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CountTokens +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1_generated_GenerativeService_CountTokens_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1 + + +def sample_count_tokens(): + # Create a client + client = generativelanguage_v1.GenerativeServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1.CountTokensRequest( + model="model_value", + ) + + # Make the request + response = client.count_tokens(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1_generated_GenerativeService_CountTokens_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_embed_content_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_embed_content_async.py new file mode 100644 index 000000000000..c48444f44753 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_embed_content_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for EmbedContent +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1_generated_GenerativeService_EmbedContent_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1 + + +async def sample_embed_content(): + # Create a client + client = generativelanguage_v1.GenerativeServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1.EmbedContentRequest( + model="model_value", + ) + + # Make the request + response = await client.embed_content(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1_generated_GenerativeService_EmbedContent_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_embed_content_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_embed_content_sync.py new file mode 100644 index 000000000000..a4c8aa05edf2 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_embed_content_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for EmbedContent +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1_generated_GenerativeService_EmbedContent_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1 + + +def sample_embed_content(): + # Create a client + client = generativelanguage_v1.GenerativeServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1.EmbedContentRequest( + model="model_value", + ) + + # Make the request + response = client.embed_content(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1_generated_GenerativeService_EmbedContent_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_generate_content_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_generate_content_async.py new file mode 100644 index 000000000000..2a5f898ba337 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_generate_content_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GenerateContent +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1_generated_GenerativeService_GenerateContent_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1 + + +async def sample_generate_content(): + # Create a client + client = generativelanguage_v1.GenerativeServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1.GenerateContentRequest( + model="model_value", + ) + + # Make the request + response = await client.generate_content(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1_generated_GenerativeService_GenerateContent_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_generate_content_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_generate_content_sync.py new file mode 100644 index 000000000000..b2ab403f2565 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_generate_content_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GenerateContent +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1_generated_GenerativeService_GenerateContent_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1 + + +def sample_generate_content(): + # Create a client + client = generativelanguage_v1.GenerativeServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1.GenerateContentRequest( + model="model_value", + ) + + # Make the request + response = client.generate_content(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1_generated_GenerativeService_GenerateContent_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_stream_generate_content_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_stream_generate_content_async.py new file mode 100644 index 000000000000..e17c7b39be8a --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_stream_generate_content_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for StreamGenerateContent +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1_generated_GenerativeService_StreamGenerateContent_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1 + + +async def sample_stream_generate_content(): + # Create a client + client = generativelanguage_v1.GenerativeServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1.GenerateContentRequest( + model="model_value", + ) + + # Make the request + stream = await client.stream_generate_content(request=request) + + # Handle the response + async for response in stream: + print(response) + +# [END generativelanguage_v1_generated_GenerativeService_StreamGenerateContent_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_stream_generate_content_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_stream_generate_content_sync.py new file mode 100644 index 000000000000..ef1332ba1925 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_generative_service_stream_generate_content_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for StreamGenerateContent +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1_generated_GenerativeService_StreamGenerateContent_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1 + + +def sample_stream_generate_content(): + # Create a client + client = generativelanguage_v1.GenerativeServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1.GenerateContentRequest( + model="model_value", + ) + + # Make the request + stream = client.stream_generate_content(request=request) + + # Handle the response + for response in stream: + print(response) + +# [END generativelanguage_v1_generated_GenerativeService_StreamGenerateContent_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_model_service_get_model_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_model_service_get_model_async.py new file mode 100644 index 000000000000..536a666693ea --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_model_service_get_model_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1_generated_ModelService_GetModel_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1 + + +async def sample_get_model(): + # Create a client + client = generativelanguage_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1.GetModelRequest( + name="name_value", + ) + + # Make the request + response = await client.get_model(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1_generated_ModelService_GetModel_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_model_service_get_model_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_model_service_get_model_sync.py new file mode 100644 index 000000000000..f9c1bca11861 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_model_service_get_model_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1_generated_ModelService_GetModel_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1 + + +def sample_get_model(): + # Create a client + client = generativelanguage_v1.ModelServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1.GetModelRequest( + name="name_value", + ) + + # Make the request + response = client.get_model(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1_generated_ModelService_GetModel_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_model_service_list_models_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_model_service_list_models_async.py new file mode 100644 index 000000000000..daeefd8eb668 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_model_service_list_models_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModels +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1_generated_ModelService_ListModels_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1 + + +async def sample_list_models(): + # Create a client + client = generativelanguage_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1.ListModelsRequest( + ) + + # Make the request + page_result = client.list_models(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END generativelanguage_v1_generated_ModelService_ListModels_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_model_service_list_models_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_model_service_list_models_sync.py new file mode 100644 index 000000000000..45f55d130e67 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1_generated_model_service_list_models_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModels +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1_generated_ModelService_ListModels_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1 + + +def sample_list_models(): + # Create a client + client = generativelanguage_v1.ModelServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1.ListModelsRequest( + ) + + # Make the request + page_result = client.list_models(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END generativelanguage_v1_generated_ModelService_ListModels_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_discuss_service_count_message_tokens_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_discuss_service_count_message_tokens_async.py new file mode 100644 index 000000000000..05f4ed4b23cd --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_discuss_service_count_message_tokens_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CountMessageTokens +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_DiscussService_CountMessageTokens_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_count_message_tokens(): + # Create a client + client = generativelanguage_v1beta.DiscussServiceAsyncClient() + + # Initialize request argument(s) + prompt = generativelanguage_v1beta.MessagePrompt() + prompt.messages.content = "content_value" + + request = generativelanguage_v1beta.CountMessageTokensRequest( + model="model_value", + prompt=prompt, + ) + + # Make the request + response = await client.count_message_tokens(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_DiscussService_CountMessageTokens_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_discuss_service_count_message_tokens_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_discuss_service_count_message_tokens_sync.py new file mode 100644 index 000000000000..1a8a1fa7fccf --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_discuss_service_count_message_tokens_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CountMessageTokens +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_DiscussService_CountMessageTokens_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_count_message_tokens(): + # Create a client + client = generativelanguage_v1beta.DiscussServiceClient() + + # Initialize request argument(s) + prompt = generativelanguage_v1beta.MessagePrompt() + prompt.messages.content = "content_value" + + request = generativelanguage_v1beta.CountMessageTokensRequest( + model="model_value", + prompt=prompt, + ) + + # Make the request + response = client.count_message_tokens(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_DiscussService_CountMessageTokens_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_discuss_service_generate_message_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_discuss_service_generate_message_async.py new file mode 100644 index 000000000000..adb768c3fb01 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_discuss_service_generate_message_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GenerateMessage +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_DiscussService_GenerateMessage_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_generate_message(): + # Create a client + client = generativelanguage_v1beta.DiscussServiceAsyncClient() + + # Initialize request argument(s) + prompt = generativelanguage_v1beta.MessagePrompt() + prompt.messages.content = "content_value" + + request = generativelanguage_v1beta.GenerateMessageRequest( + model="model_value", + prompt=prompt, + ) + + # Make the request + response = await client.generate_message(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_DiscussService_GenerateMessage_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_discuss_service_generate_message_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_discuss_service_generate_message_sync.py new file mode 100644 index 000000000000..eb3418bd36e7 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_discuss_service_generate_message_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GenerateMessage +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_DiscussService_GenerateMessage_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_generate_message(): + # Create a client + client = generativelanguage_v1beta.DiscussServiceClient() + + # Initialize request argument(s) + prompt = generativelanguage_v1beta.MessagePrompt() + prompt.messages.content = "content_value" + + request = generativelanguage_v1beta.GenerateMessageRequest( + model="model_value", + prompt=prompt, + ) + + # Make the request + response = client.generate_message(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_DiscussService_GenerateMessage_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_batch_embed_contents_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_batch_embed_contents_async.py new file mode 100644 index 000000000000..857072a8535f --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_batch_embed_contents_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchEmbedContents +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_GenerativeService_BatchEmbedContents_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_batch_embed_contents(): + # Create a client + client = generativelanguage_v1beta.GenerativeServiceAsyncClient() + + # Initialize request argument(s) + requests = generativelanguage_v1beta.EmbedContentRequest() + requests.model = "model_value" + + request = generativelanguage_v1beta.BatchEmbedContentsRequest( + model="model_value", + requests=requests, + ) + + # Make the request + response = await client.batch_embed_contents(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_GenerativeService_BatchEmbedContents_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_batch_embed_contents_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_batch_embed_contents_sync.py new file mode 100644 index 000000000000..820b7e9b735a --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_batch_embed_contents_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchEmbedContents +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_GenerativeService_BatchEmbedContents_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_batch_embed_contents(): + # Create a client + client = generativelanguage_v1beta.GenerativeServiceClient() + + # Initialize request argument(s) + requests = generativelanguage_v1beta.EmbedContentRequest() + requests.model = "model_value" + + request = generativelanguage_v1beta.BatchEmbedContentsRequest( + model="model_value", + requests=requests, + ) + + # Make the request + response = client.batch_embed_contents(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_GenerativeService_BatchEmbedContents_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_count_tokens_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_count_tokens_async.py new file mode 100644 index 000000000000..f87099aac576 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_count_tokens_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CountTokens +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_GenerativeService_CountTokens_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_count_tokens(): + # Create a client + client = generativelanguage_v1beta.GenerativeServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.CountTokensRequest( + model="model_value", + ) + + # Make the request + response = await client.count_tokens(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_GenerativeService_CountTokens_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_count_tokens_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_count_tokens_sync.py new file mode 100644 index 000000000000..6ae5aa75fb73 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_count_tokens_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CountTokens +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_GenerativeService_CountTokens_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_count_tokens(): + # Create a client + client = generativelanguage_v1beta.GenerativeServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.CountTokensRequest( + model="model_value", + ) + + # Make the request + response = client.count_tokens(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_GenerativeService_CountTokens_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_embed_content_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_embed_content_async.py new file mode 100644 index 000000000000..391accc15459 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_embed_content_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for EmbedContent +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_GenerativeService_EmbedContent_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_embed_content(): + # Create a client + client = generativelanguage_v1beta.GenerativeServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.EmbedContentRequest( + model="model_value", + ) + + # Make the request + response = await client.embed_content(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_GenerativeService_EmbedContent_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_embed_content_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_embed_content_sync.py new file mode 100644 index 000000000000..f0383ccc4461 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_embed_content_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for EmbedContent +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_GenerativeService_EmbedContent_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_embed_content(): + # Create a client + client = generativelanguage_v1beta.GenerativeServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.EmbedContentRequest( + model="model_value", + ) + + # Make the request + response = client.embed_content(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_GenerativeService_EmbedContent_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_generate_answer_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_generate_answer_async.py new file mode 100644 index 000000000000..14dc2e490fb9 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_generate_answer_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GenerateAnswer +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_GenerativeService_GenerateAnswer_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_generate_answer(): + # Create a client + client = generativelanguage_v1beta.GenerativeServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GenerateAnswerRequest( + model="model_value", + answer_style="VERBOSE", + ) + + # Make the request + response = await client.generate_answer(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_GenerativeService_GenerateAnswer_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_generate_answer_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_generate_answer_sync.py new file mode 100644 index 000000000000..74ea769614af --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_generate_answer_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GenerateAnswer +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_GenerativeService_GenerateAnswer_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_generate_answer(): + # Create a client + client = generativelanguage_v1beta.GenerativeServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GenerateAnswerRequest( + model="model_value", + answer_style="VERBOSE", + ) + + # Make the request + response = client.generate_answer(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_GenerativeService_GenerateAnswer_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_generate_content_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_generate_content_async.py new file mode 100644 index 000000000000..9445101240f2 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_generate_content_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GenerateContent +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_GenerativeService_GenerateContent_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_generate_content(): + # Create a client + client = generativelanguage_v1beta.GenerativeServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GenerateContentRequest( + model="model_value", + ) + + # Make the request + response = await client.generate_content(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_GenerativeService_GenerateContent_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_generate_content_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_generate_content_sync.py new file mode 100644 index 000000000000..c8e66df83bec --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_generate_content_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GenerateContent +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_GenerativeService_GenerateContent_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_generate_content(): + # Create a client + client = generativelanguage_v1beta.GenerativeServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GenerateContentRequest( + model="model_value", + ) + + # Make the request + response = client.generate_content(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_GenerativeService_GenerateContent_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_stream_generate_content_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_stream_generate_content_async.py new file mode 100644 index 000000000000..38286cae7948 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_stream_generate_content_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for StreamGenerateContent +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_GenerativeService_StreamGenerateContent_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_stream_generate_content(): + # Create a client + client = generativelanguage_v1beta.GenerativeServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GenerateContentRequest( + model="model_value", + ) + + # Make the request + stream = await client.stream_generate_content(request=request) + + # Handle the response + async for response in stream: + print(response) + +# [END generativelanguage_v1beta_generated_GenerativeService_StreamGenerateContent_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_stream_generate_content_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_stream_generate_content_sync.py new file mode 100644 index 000000000000..e37f9a08530a --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_generative_service_stream_generate_content_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for StreamGenerateContent +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_GenerativeService_StreamGenerateContent_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_stream_generate_content(): + # Create a client + client = generativelanguage_v1beta.GenerativeServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GenerateContentRequest( + model="model_value", + ) + + # Make the request + stream = client.stream_generate_content(request=request) + + # Handle the response + for response in stream: + print(response) + +# [END generativelanguage_v1beta_generated_GenerativeService_StreamGenerateContent_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_create_tuned_model_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_create_tuned_model_async.py new file mode 100644 index 000000000000..2262962029f1 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_create_tuned_model_async.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTunedModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_ModelService_CreateTunedModel_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_create_tuned_model(): + # Create a client + client = generativelanguage_v1beta.ModelServiceAsyncClient() + + # Initialize request argument(s) + tuned_model = generativelanguage_v1beta.TunedModel() + tuned_model.tuning_task.training_data.examples.examples.text_input = "text_input_value" + tuned_model.tuning_task.training_data.examples.examples.output = "output_value" + + request = generativelanguage_v1beta.CreateTunedModelRequest( + tuned_model=tuned_model, + ) + + # Make the request + operation = client.create_tuned_model(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_ModelService_CreateTunedModel_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_create_tuned_model_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_create_tuned_model_sync.py new file mode 100644 index 000000000000..6df1d7f57550 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_create_tuned_model_sync.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTunedModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_ModelService_CreateTunedModel_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_create_tuned_model(): + # Create a client + client = generativelanguage_v1beta.ModelServiceClient() + + # Initialize request argument(s) + tuned_model = generativelanguage_v1beta.TunedModel() + tuned_model.tuning_task.training_data.examples.examples.text_input = "text_input_value" + tuned_model.tuning_task.training_data.examples.examples.output = "output_value" + + request = generativelanguage_v1beta.CreateTunedModelRequest( + tuned_model=tuned_model, + ) + + # Make the request + operation = client.create_tuned_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_ModelService_CreateTunedModel_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_delete_tuned_model_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_delete_tuned_model_async.py new file mode 100644 index 000000000000..40e6f6769563 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_delete_tuned_model_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTunedModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_ModelService_DeleteTunedModel_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_delete_tuned_model(): + # Create a client + client = generativelanguage_v1beta.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.DeleteTunedModelRequest( + name="name_value", + ) + + # Make the request + await client.delete_tuned_model(request=request) + + +# [END generativelanguage_v1beta_generated_ModelService_DeleteTunedModel_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_delete_tuned_model_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_delete_tuned_model_sync.py new file mode 100644 index 000000000000..295bdc565b42 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_delete_tuned_model_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTunedModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_ModelService_DeleteTunedModel_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_delete_tuned_model(): + # Create a client + client = generativelanguage_v1beta.ModelServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.DeleteTunedModelRequest( + name="name_value", + ) + + # Make the request + client.delete_tuned_model(request=request) + + +# [END generativelanguage_v1beta_generated_ModelService_DeleteTunedModel_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_get_model_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_get_model_async.py new file mode 100644 index 000000000000..0bcb3e02a62d --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_get_model_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_ModelService_GetModel_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_get_model(): + # Create a client + client = generativelanguage_v1beta.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GetModelRequest( + name="name_value", + ) + + # Make the request + response = await client.get_model(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_ModelService_GetModel_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_get_model_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_get_model_sync.py new file mode 100644 index 000000000000..5c81a46e8fdc --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_get_model_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_ModelService_GetModel_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_get_model(): + # Create a client + client = generativelanguage_v1beta.ModelServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GetModelRequest( + name="name_value", + ) + + # Make the request + response = client.get_model(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_ModelService_GetModel_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_get_tuned_model_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_get_tuned_model_async.py new file mode 100644 index 000000000000..450f0cad30d3 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_get_tuned_model_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTunedModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_ModelService_GetTunedModel_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_get_tuned_model(): + # Create a client + client = generativelanguage_v1beta.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GetTunedModelRequest( + name="name_value", + ) + + # Make the request + response = await client.get_tuned_model(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_ModelService_GetTunedModel_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_get_tuned_model_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_get_tuned_model_sync.py new file mode 100644 index 000000000000..67682c9dd23a --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_get_tuned_model_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTunedModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_ModelService_GetTunedModel_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_get_tuned_model(): + # Create a client + client = generativelanguage_v1beta.ModelServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GetTunedModelRequest( + name="name_value", + ) + + # Make the request + response = client.get_tuned_model(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_ModelService_GetTunedModel_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_list_models_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_list_models_async.py new file mode 100644 index 000000000000..caf3332547cb --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_list_models_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModels +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_ModelService_ListModels_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_list_models(): + # Create a client + client = generativelanguage_v1beta.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.ListModelsRequest( + ) + + # Make the request + page_result = client.list_models(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END generativelanguage_v1beta_generated_ModelService_ListModels_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_list_models_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_list_models_sync.py new file mode 100644 index 000000000000..804db1790e46 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_list_models_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModels +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_ModelService_ListModels_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_list_models(): + # Create a client + client = generativelanguage_v1beta.ModelServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.ListModelsRequest( + ) + + # Make the request + page_result = client.list_models(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END generativelanguage_v1beta_generated_ModelService_ListModels_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_list_tuned_models_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_list_tuned_models_async.py new file mode 100644 index 000000000000..cc7a15078df8 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_list_tuned_models_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTunedModels +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_ModelService_ListTunedModels_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_list_tuned_models(): + # Create a client + client = generativelanguage_v1beta.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.ListTunedModelsRequest( + ) + + # Make the request + page_result = client.list_tuned_models(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END generativelanguage_v1beta_generated_ModelService_ListTunedModels_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_list_tuned_models_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_list_tuned_models_sync.py new file mode 100644 index 000000000000..c9562f6d5e4f --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_list_tuned_models_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTunedModels +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_ModelService_ListTunedModels_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_list_tuned_models(): + # Create a client + client = generativelanguage_v1beta.ModelServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.ListTunedModelsRequest( + ) + + # Make the request + page_result = client.list_tuned_models(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END generativelanguage_v1beta_generated_ModelService_ListTunedModels_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_update_tuned_model_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_update_tuned_model_async.py new file mode 100644 index 000000000000..7c5f9b7bb810 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_update_tuned_model_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateTunedModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_ModelService_UpdateTunedModel_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_update_tuned_model(): + # Create a client + client = generativelanguage_v1beta.ModelServiceAsyncClient() + + # Initialize request argument(s) + tuned_model = generativelanguage_v1beta.TunedModel() + tuned_model.tuning_task.training_data.examples.examples.text_input = "text_input_value" + tuned_model.tuning_task.training_data.examples.examples.output = "output_value" + + request = generativelanguage_v1beta.UpdateTunedModelRequest( + tuned_model=tuned_model, + ) + + # Make the request + response = await client.update_tuned_model(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_ModelService_UpdateTunedModel_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_update_tuned_model_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_update_tuned_model_sync.py new file mode 100644 index 000000000000..c168ff81a340 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_model_service_update_tuned_model_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateTunedModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_ModelService_UpdateTunedModel_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_update_tuned_model(): + # Create a client + client = generativelanguage_v1beta.ModelServiceClient() + + # Initialize request argument(s) + tuned_model = generativelanguage_v1beta.TunedModel() + tuned_model.tuning_task.training_data.examples.examples.text_input = "text_input_value" + tuned_model.tuning_task.training_data.examples.examples.output = "output_value" + + request = generativelanguage_v1beta.UpdateTunedModelRequest( + tuned_model=tuned_model, + ) + + # Make the request + response = client.update_tuned_model(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_ModelService_UpdateTunedModel_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_create_permission_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_create_permission_async.py new file mode 100644 index 000000000000..aaa3e5de8e79 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_create_permission_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreatePermission +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_PermissionService_CreatePermission_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_create_permission(): + # Create a client + client = generativelanguage_v1beta.PermissionServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.CreatePermissionRequest( + parent="parent_value", + ) + + # Make the request + response = await client.create_permission(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_PermissionService_CreatePermission_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_create_permission_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_create_permission_sync.py new file mode 100644 index 000000000000..c39e3ca1a58e --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_create_permission_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreatePermission +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_PermissionService_CreatePermission_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_create_permission(): + # Create a client + client = generativelanguage_v1beta.PermissionServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.CreatePermissionRequest( + parent="parent_value", + ) + + # Make the request + response = client.create_permission(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_PermissionService_CreatePermission_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_delete_permission_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_delete_permission_async.py new file mode 100644 index 000000000000..392d2bb22836 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_delete_permission_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeletePermission +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_PermissionService_DeletePermission_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_delete_permission(): + # Create a client + client = generativelanguage_v1beta.PermissionServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.DeletePermissionRequest( + name="name_value", + ) + + # Make the request + await client.delete_permission(request=request) + + +# [END generativelanguage_v1beta_generated_PermissionService_DeletePermission_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_delete_permission_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_delete_permission_sync.py new file mode 100644 index 000000000000..4ee5fa337b0b --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_delete_permission_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeletePermission +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_PermissionService_DeletePermission_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_delete_permission(): + # Create a client + client = generativelanguage_v1beta.PermissionServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.DeletePermissionRequest( + name="name_value", + ) + + # Make the request + client.delete_permission(request=request) + + +# [END generativelanguage_v1beta_generated_PermissionService_DeletePermission_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_get_permission_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_get_permission_async.py new file mode 100644 index 000000000000..efe727b0d7bf --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_get_permission_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetPermission +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_PermissionService_GetPermission_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_get_permission(): + # Create a client + client = generativelanguage_v1beta.PermissionServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GetPermissionRequest( + name="name_value", + ) + + # Make the request + response = await client.get_permission(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_PermissionService_GetPermission_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_get_permission_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_get_permission_sync.py new file mode 100644 index 000000000000..7b6a4653b6d1 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_get_permission_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetPermission +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_PermissionService_GetPermission_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_get_permission(): + # Create a client + client = generativelanguage_v1beta.PermissionServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GetPermissionRequest( + name="name_value", + ) + + # Make the request + response = client.get_permission(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_PermissionService_GetPermission_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_list_permissions_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_list_permissions_async.py new file mode 100644 index 000000000000..4e2ee7ef39de --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_list_permissions_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListPermissions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_PermissionService_ListPermissions_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_list_permissions(): + # Create a client + client = generativelanguage_v1beta.PermissionServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.ListPermissionsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_permissions(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END generativelanguage_v1beta_generated_PermissionService_ListPermissions_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_list_permissions_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_list_permissions_sync.py new file mode 100644 index 000000000000..5caf2589e792 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_list_permissions_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListPermissions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_PermissionService_ListPermissions_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_list_permissions(): + # Create a client + client = generativelanguage_v1beta.PermissionServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.ListPermissionsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_permissions(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END generativelanguage_v1beta_generated_PermissionService_ListPermissions_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_transfer_ownership_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_transfer_ownership_async.py new file mode 100644 index 000000000000..0f7a3d028beb --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_transfer_ownership_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for TransferOwnership +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_PermissionService_TransferOwnership_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_transfer_ownership(): + # Create a client + client = generativelanguage_v1beta.PermissionServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.TransferOwnershipRequest( + name="name_value", + email_address="email_address_value", + ) + + # Make the request + response = await client.transfer_ownership(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_PermissionService_TransferOwnership_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_transfer_ownership_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_transfer_ownership_sync.py new file mode 100644 index 000000000000..8fffd9821b81 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_transfer_ownership_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for TransferOwnership +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_PermissionService_TransferOwnership_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_transfer_ownership(): + # Create a client + client = generativelanguage_v1beta.PermissionServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.TransferOwnershipRequest( + name="name_value", + email_address="email_address_value", + ) + + # Make the request + response = client.transfer_ownership(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_PermissionService_TransferOwnership_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_update_permission_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_update_permission_async.py new file mode 100644 index 000000000000..a9e926b06b5d --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_update_permission_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdatePermission +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_PermissionService_UpdatePermission_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_update_permission(): + # Create a client + client = generativelanguage_v1beta.PermissionServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.UpdatePermissionRequest( + ) + + # Make the request + response = await client.update_permission(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_PermissionService_UpdatePermission_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_update_permission_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_update_permission_sync.py new file mode 100644 index 000000000000..04e8ad9e9d90 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_permission_service_update_permission_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdatePermission +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_PermissionService_UpdatePermission_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_update_permission(): + # Create a client + client = generativelanguage_v1beta.PermissionServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.UpdatePermissionRequest( + ) + + # Make the request + response = client.update_permission(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_PermissionService_UpdatePermission_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_batch_create_chunks_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_batch_create_chunks_async.py new file mode 100644 index 000000000000..103d94d9db16 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_batch_create_chunks_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchCreateChunks +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_BatchCreateChunks_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_batch_create_chunks(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + requests = generativelanguage_v1beta.CreateChunkRequest() + requests.parent = "parent_value" + requests.chunk.data.string_value = "string_value_value" + + request = generativelanguage_v1beta.BatchCreateChunksRequest( + requests=requests, + ) + + # Make the request + response = await client.batch_create_chunks(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_RetrieverService_BatchCreateChunks_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_batch_create_chunks_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_batch_create_chunks_sync.py new file mode 100644 index 000000000000..d5af383da5d6 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_batch_create_chunks_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchCreateChunks +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_BatchCreateChunks_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_batch_create_chunks(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + requests = generativelanguage_v1beta.CreateChunkRequest() + requests.parent = "parent_value" + requests.chunk.data.string_value = "string_value_value" + + request = generativelanguage_v1beta.BatchCreateChunksRequest( + requests=requests, + ) + + # Make the request + response = client.batch_create_chunks(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_RetrieverService_BatchCreateChunks_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_batch_delete_chunks_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_batch_delete_chunks_async.py new file mode 100644 index 000000000000..8a68d6933cff --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_batch_delete_chunks_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchDeleteChunks +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_BatchDeleteChunks_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_batch_delete_chunks(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + requests = generativelanguage_v1beta.DeleteChunkRequest() + requests.name = "name_value" + + request = generativelanguage_v1beta.BatchDeleteChunksRequest( + requests=requests, + ) + + # Make the request + await client.batch_delete_chunks(request=request) + + +# [END generativelanguage_v1beta_generated_RetrieverService_BatchDeleteChunks_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_batch_delete_chunks_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_batch_delete_chunks_sync.py new file mode 100644 index 000000000000..df9d3abd31ca --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_batch_delete_chunks_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchDeleteChunks +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_BatchDeleteChunks_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_batch_delete_chunks(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + requests = generativelanguage_v1beta.DeleteChunkRequest() + requests.name = "name_value" + + request = generativelanguage_v1beta.BatchDeleteChunksRequest( + requests=requests, + ) + + # Make the request + client.batch_delete_chunks(request=request) + + +# [END generativelanguage_v1beta_generated_RetrieverService_BatchDeleteChunks_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_batch_update_chunks_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_batch_update_chunks_async.py new file mode 100644 index 000000000000..64595c08737f --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_batch_update_chunks_async.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchUpdateChunks +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_BatchUpdateChunks_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_batch_update_chunks(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + requests = generativelanguage_v1beta.UpdateChunkRequest() + requests.chunk.data.string_value = "string_value_value" + + request = generativelanguage_v1beta.BatchUpdateChunksRequest( + requests=requests, + ) + + # Make the request + response = await client.batch_update_chunks(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_RetrieverService_BatchUpdateChunks_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_batch_update_chunks_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_batch_update_chunks_sync.py new file mode 100644 index 000000000000..a659f9a4b0f8 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_batch_update_chunks_sync.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchUpdateChunks +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_BatchUpdateChunks_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_batch_update_chunks(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + requests = generativelanguage_v1beta.UpdateChunkRequest() + requests.chunk.data.string_value = "string_value_value" + + request = generativelanguage_v1beta.BatchUpdateChunksRequest( + requests=requests, + ) + + # Make the request + response = client.batch_update_chunks(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_RetrieverService_BatchUpdateChunks_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_create_chunk_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_create_chunk_async.py new file mode 100644 index 000000000000..1d058bd2282f --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_create_chunk_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateChunk +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_CreateChunk_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_create_chunk(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + chunk = generativelanguage_v1beta.Chunk() + chunk.data.string_value = "string_value_value" + + request = generativelanguage_v1beta.CreateChunkRequest( + parent="parent_value", + chunk=chunk, + ) + + # Make the request + response = await client.create_chunk(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_RetrieverService_CreateChunk_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_create_chunk_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_create_chunk_sync.py new file mode 100644 index 000000000000..19f079d74dc3 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_create_chunk_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateChunk +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_CreateChunk_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_create_chunk(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + chunk = generativelanguage_v1beta.Chunk() + chunk.data.string_value = "string_value_value" + + request = generativelanguage_v1beta.CreateChunkRequest( + parent="parent_value", + chunk=chunk, + ) + + # Make the request + response = client.create_chunk(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_RetrieverService_CreateChunk_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_create_corpus_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_create_corpus_async.py new file mode 100644 index 000000000000..c2896649487c --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_create_corpus_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateCorpus +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_CreateCorpus_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_create_corpus(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.CreateCorpusRequest( + ) + + # Make the request + response = await client.create_corpus(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_RetrieverService_CreateCorpus_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_create_corpus_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_create_corpus_sync.py new file mode 100644 index 000000000000..5527d28e3f39 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_create_corpus_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateCorpus +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_CreateCorpus_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_create_corpus(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.CreateCorpusRequest( + ) + + # Make the request + response = client.create_corpus(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_RetrieverService_CreateCorpus_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_create_document_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_create_document_async.py new file mode 100644 index 000000000000..f90311756f25 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_create_document_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateDocument +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_CreateDocument_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_create_document(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.CreateDocumentRequest( + parent="parent_value", + ) + + # Make the request + response = await client.create_document(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_RetrieverService_CreateDocument_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_create_document_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_create_document_sync.py new file mode 100644 index 000000000000..c47e7d8cd2b8 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_create_document_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateDocument +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_CreateDocument_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_create_document(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.CreateDocumentRequest( + parent="parent_value", + ) + + # Make the request + response = client.create_document(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_RetrieverService_CreateDocument_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_delete_chunk_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_delete_chunk_async.py new file mode 100644 index 000000000000..05dcc3d8258a --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_delete_chunk_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteChunk +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_DeleteChunk_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_delete_chunk(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.DeleteChunkRequest( + name="name_value", + ) + + # Make the request + await client.delete_chunk(request=request) + + +# [END generativelanguage_v1beta_generated_RetrieverService_DeleteChunk_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_delete_chunk_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_delete_chunk_sync.py new file mode 100644 index 000000000000..23877de88d6e --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_delete_chunk_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteChunk +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_DeleteChunk_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_delete_chunk(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.DeleteChunkRequest( + name="name_value", + ) + + # Make the request + client.delete_chunk(request=request) + + +# [END generativelanguage_v1beta_generated_RetrieverService_DeleteChunk_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_delete_corpus_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_delete_corpus_async.py new file mode 100644 index 000000000000..f09bc255c979 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_delete_corpus_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteCorpus +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_DeleteCorpus_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_delete_corpus(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.DeleteCorpusRequest( + name="name_value", + ) + + # Make the request + await client.delete_corpus(request=request) + + +# [END generativelanguage_v1beta_generated_RetrieverService_DeleteCorpus_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_delete_corpus_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_delete_corpus_sync.py new file mode 100644 index 000000000000..931df87678a8 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_delete_corpus_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteCorpus +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_DeleteCorpus_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_delete_corpus(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.DeleteCorpusRequest( + name="name_value", + ) + + # Make the request + client.delete_corpus(request=request) + + +# [END generativelanguage_v1beta_generated_RetrieverService_DeleteCorpus_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_delete_document_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_delete_document_async.py new file mode 100644 index 000000000000..040e1e686eb6 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_delete_document_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteDocument +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_DeleteDocument_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_delete_document(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.DeleteDocumentRequest( + name="name_value", + ) + + # Make the request + await client.delete_document(request=request) + + +# [END generativelanguage_v1beta_generated_RetrieverService_DeleteDocument_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_delete_document_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_delete_document_sync.py new file mode 100644 index 000000000000..46c027046aa3 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_delete_document_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteDocument +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_DeleteDocument_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_delete_document(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.DeleteDocumentRequest( + name="name_value", + ) + + # Make the request + client.delete_document(request=request) + + +# [END generativelanguage_v1beta_generated_RetrieverService_DeleteDocument_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_get_chunk_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_get_chunk_async.py new file mode 100644 index 000000000000..deb7be6f8ade --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_get_chunk_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetChunk +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_GetChunk_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_get_chunk(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GetChunkRequest( + name="name_value", + ) + + # Make the request + response = await client.get_chunk(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_RetrieverService_GetChunk_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_get_chunk_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_get_chunk_sync.py new file mode 100644 index 000000000000..7f6969ccbfe2 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_get_chunk_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetChunk +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_GetChunk_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_get_chunk(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GetChunkRequest( + name="name_value", + ) + + # Make the request + response = client.get_chunk(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_RetrieverService_GetChunk_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_get_corpus_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_get_corpus_async.py new file mode 100644 index 000000000000..fe453dd4b86a --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_get_corpus_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetCorpus +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_GetCorpus_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_get_corpus(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GetCorpusRequest( + name="name_value", + ) + + # Make the request + response = await client.get_corpus(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_RetrieverService_GetCorpus_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_get_corpus_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_get_corpus_sync.py new file mode 100644 index 000000000000..cca86a7eb166 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_get_corpus_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetCorpus +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_GetCorpus_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_get_corpus(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GetCorpusRequest( + name="name_value", + ) + + # Make the request + response = client.get_corpus(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_RetrieverService_GetCorpus_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_get_document_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_get_document_async.py new file mode 100644 index 000000000000..d95ee2bf514e --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_get_document_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetDocument +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_GetDocument_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_get_document(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GetDocumentRequest( + name="name_value", + ) + + # Make the request + response = await client.get_document(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_RetrieverService_GetDocument_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_get_document_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_get_document_sync.py new file mode 100644 index 000000000000..030a8275119f --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_get_document_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetDocument +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_GetDocument_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_get_document(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.GetDocumentRequest( + name="name_value", + ) + + # Make the request + response = client.get_document(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_RetrieverService_GetDocument_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_list_chunks_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_list_chunks_async.py new file mode 100644 index 000000000000..5dcfaf1c6e67 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_list_chunks_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListChunks +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_ListChunks_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_list_chunks(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.ListChunksRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_chunks(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END generativelanguage_v1beta_generated_RetrieverService_ListChunks_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_list_chunks_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_list_chunks_sync.py new file mode 100644 index 000000000000..3fa75b624f77 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_list_chunks_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListChunks +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_ListChunks_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_list_chunks(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.ListChunksRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_chunks(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END generativelanguage_v1beta_generated_RetrieverService_ListChunks_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_list_corpora_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_list_corpora_async.py new file mode 100644 index 000000000000..ad59fb168c4a --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_list_corpora_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListCorpora +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_ListCorpora_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_list_corpora(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.ListCorporaRequest( + ) + + # Make the request + page_result = client.list_corpora(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END generativelanguage_v1beta_generated_RetrieverService_ListCorpora_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_list_corpora_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_list_corpora_sync.py new file mode 100644 index 000000000000..4bfd3bf558da --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_list_corpora_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListCorpora +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_ListCorpora_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_list_corpora(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.ListCorporaRequest( + ) + + # Make the request + page_result = client.list_corpora(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END generativelanguage_v1beta_generated_RetrieverService_ListCorpora_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_list_documents_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_list_documents_async.py new file mode 100644 index 000000000000..d9502d1da332 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_list_documents_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDocuments +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_ListDocuments_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_list_documents(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.ListDocumentsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_documents(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END generativelanguage_v1beta_generated_RetrieverService_ListDocuments_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_list_documents_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_list_documents_sync.py new file mode 100644 index 000000000000..dbfb2cafe0e9 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_list_documents_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDocuments +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_ListDocuments_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_list_documents(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.ListDocumentsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_documents(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END generativelanguage_v1beta_generated_RetrieverService_ListDocuments_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_query_corpus_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_query_corpus_async.py new file mode 100644 index 000000000000..c75fcdb56251 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_query_corpus_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryCorpus +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_QueryCorpus_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_query_corpus(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.QueryCorpusRequest( + name="name_value", + query="query_value", + ) + + # Make the request + response = await client.query_corpus(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_RetrieverService_QueryCorpus_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_query_corpus_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_query_corpus_sync.py new file mode 100644 index 000000000000..ae3815bca1a0 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_query_corpus_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryCorpus +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_QueryCorpus_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_query_corpus(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.QueryCorpusRequest( + name="name_value", + query="query_value", + ) + + # Make the request + response = client.query_corpus(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_RetrieverService_QueryCorpus_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_query_document_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_query_document_async.py new file mode 100644 index 000000000000..09b1c3c683ce --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_query_document_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryDocument +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_QueryDocument_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_query_document(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.QueryDocumentRequest( + name="name_value", + query="query_value", + ) + + # Make the request + response = await client.query_document(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_RetrieverService_QueryDocument_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_query_document_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_query_document_sync.py new file mode 100644 index 000000000000..e5caba8a23ad --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_query_document_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryDocument +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_QueryDocument_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_query_document(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.QueryDocumentRequest( + name="name_value", + query="query_value", + ) + + # Make the request + response = client.query_document(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_RetrieverService_QueryDocument_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_update_chunk_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_update_chunk_async.py new file mode 100644 index 000000000000..d3e54b8aadb4 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_update_chunk_async.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateChunk +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_UpdateChunk_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_update_chunk(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + chunk = generativelanguage_v1beta.Chunk() + chunk.data.string_value = "string_value_value" + + request = generativelanguage_v1beta.UpdateChunkRequest( + chunk=chunk, + ) + + # Make the request + response = await client.update_chunk(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_RetrieverService_UpdateChunk_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_update_chunk_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_update_chunk_sync.py new file mode 100644 index 000000000000..47b7566cca7e --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_update_chunk_sync.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateChunk +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_UpdateChunk_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_update_chunk(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + chunk = generativelanguage_v1beta.Chunk() + chunk.data.string_value = "string_value_value" + + request = generativelanguage_v1beta.UpdateChunkRequest( + chunk=chunk, + ) + + # Make the request + response = client.update_chunk(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_RetrieverService_UpdateChunk_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_update_corpus_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_update_corpus_async.py new file mode 100644 index 000000000000..5c036d4ae8c7 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_update_corpus_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateCorpus +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_UpdateCorpus_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_update_corpus(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.UpdateCorpusRequest( + ) + + # Make the request + response = await client.update_corpus(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_RetrieverService_UpdateCorpus_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_update_corpus_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_update_corpus_sync.py new file mode 100644 index 000000000000..a587467ffc75 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_update_corpus_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateCorpus +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_UpdateCorpus_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_update_corpus(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.UpdateCorpusRequest( + ) + + # Make the request + response = client.update_corpus(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_RetrieverService_UpdateCorpus_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_update_document_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_update_document_async.py new file mode 100644 index 000000000000..94a1218df8b7 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_update_document_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateDocument +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_UpdateDocument_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_update_document(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.UpdateDocumentRequest( + ) + + # Make the request + response = await client.update_document(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_RetrieverService_UpdateDocument_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_update_document_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_update_document_sync.py new file mode 100644 index 000000000000..e5b6912241e1 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_retriever_service_update_document_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateDocument +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_RetrieverService_UpdateDocument_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_update_document(): + # Create a client + client = generativelanguage_v1beta.RetrieverServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.UpdateDocumentRequest( + ) + + # Make the request + response = client.update_document(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_RetrieverService_UpdateDocument_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_text_service_batch_embed_text_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_text_service_batch_embed_text_async.py new file mode 100644 index 000000000000..a8c997c7d26b --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_text_service_batch_embed_text_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchEmbedText +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_TextService_BatchEmbedText_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_batch_embed_text(): + # Create a client + client = generativelanguage_v1beta.TextServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.BatchEmbedTextRequest( + model="model_value", + ) + + # Make the request + response = await client.batch_embed_text(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_TextService_BatchEmbedText_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_text_service_batch_embed_text_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_text_service_batch_embed_text_sync.py new file mode 100644 index 000000000000..c63ae3710337 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_text_service_batch_embed_text_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchEmbedText +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_TextService_BatchEmbedText_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_batch_embed_text(): + # Create a client + client = generativelanguage_v1beta.TextServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.BatchEmbedTextRequest( + model="model_value", + ) + + # Make the request + response = client.batch_embed_text(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_TextService_BatchEmbedText_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_text_service_count_text_tokens_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_text_service_count_text_tokens_async.py new file mode 100644 index 000000000000..b803a7f4a96b --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_text_service_count_text_tokens_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CountTextTokens +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_TextService_CountTextTokens_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_count_text_tokens(): + # Create a client + client = generativelanguage_v1beta.TextServiceAsyncClient() + + # Initialize request argument(s) + prompt = generativelanguage_v1beta.TextPrompt() + prompt.text = "text_value" + + request = generativelanguage_v1beta.CountTextTokensRequest( + model="model_value", + prompt=prompt, + ) + + # Make the request + response = await client.count_text_tokens(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_TextService_CountTextTokens_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_text_service_count_text_tokens_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_text_service_count_text_tokens_sync.py new file mode 100644 index 000000000000..4e79ec849d76 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_text_service_count_text_tokens_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CountTextTokens +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_TextService_CountTextTokens_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_count_text_tokens(): + # Create a client + client = generativelanguage_v1beta.TextServiceClient() + + # Initialize request argument(s) + prompt = generativelanguage_v1beta.TextPrompt() + prompt.text = "text_value" + + request = generativelanguage_v1beta.CountTextTokensRequest( + model="model_value", + prompt=prompt, + ) + + # Make the request + response = client.count_text_tokens(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_TextService_CountTextTokens_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_text_service_embed_text_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_text_service_embed_text_async.py new file mode 100644 index 000000000000..69b05bce6998 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_text_service_embed_text_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for EmbedText +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_TextService_EmbedText_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_embed_text(): + # Create a client + client = generativelanguage_v1beta.TextServiceAsyncClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.EmbedTextRequest( + model="model_value", + ) + + # Make the request + response = await client.embed_text(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_TextService_EmbedText_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_text_service_embed_text_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_text_service_embed_text_sync.py new file mode 100644 index 000000000000..71b17a5ea390 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_text_service_embed_text_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for EmbedText +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_TextService_EmbedText_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_embed_text(): + # Create a client + client = generativelanguage_v1beta.TextServiceClient() + + # Initialize request argument(s) + request = generativelanguage_v1beta.EmbedTextRequest( + model="model_value", + ) + + # Make the request + response = client.embed_text(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_TextService_EmbedText_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_text_service_generate_text_async.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_text_service_generate_text_async.py new file mode 100644 index 000000000000..0e1630d6729d --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_text_service_generate_text_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GenerateText +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_TextService_GenerateText_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +async def sample_generate_text(): + # Create a client + client = generativelanguage_v1beta.TextServiceAsyncClient() + + # Initialize request argument(s) + prompt = generativelanguage_v1beta.TextPrompt() + prompt.text = "text_value" + + request = generativelanguage_v1beta.GenerateTextRequest( + model="model_value", + prompt=prompt, + ) + + # Make the request + response = await client.generate_text(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_TextService_GenerateText_async] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_text_service_generate_text_sync.py b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_text_service_generate_text_sync.py new file mode 100644 index 000000000000..4b86847af94c --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/generativelanguage_v1beta_generated_text_service_generate_text_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GenerateText +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-ai-generativelanguage + + +# [START generativelanguage_v1beta_generated_TextService_GenerateText_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.ai import generativelanguage_v1beta + + +def sample_generate_text(): + # Create a client + client = generativelanguage_v1beta.TextServiceClient() + + # Initialize request argument(s) + prompt = generativelanguage_v1beta.TextPrompt() + prompt.text = "text_value" + + request = generativelanguage_v1beta.GenerateTextRequest( + model="model_value", + prompt=prompt, + ) + + # Make the request + response = client.generate_text(request=request) + + # Handle the response + print(response) + +# [END generativelanguage_v1beta_generated_TextService_GenerateText_sync] diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1.json b/packages/google-ai-generativelanguage/samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1.json new file mode 100644 index 000000000000..d6c3fe4c5051 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1.json @@ -0,0 +1,1190 @@ +{ + "clientLibrary": { + "apis": [ + { + "id": "google.ai.generativelanguage.v1", + "version": "v1" + } + ], + "language": "PYTHON", + "name": "google-ai-generativelanguage", + "version": "0.1.0" + }, + "snippets": [ + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1.GenerativeServiceAsyncClient", + "shortName": "GenerativeServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1.GenerativeServiceAsyncClient.batch_embed_contents", + "method": { + "fullName": "google.ai.generativelanguage.v1.GenerativeService.BatchEmbedContents", + "service": { + "fullName": "google.ai.generativelanguage.v1.GenerativeService", + "shortName": "GenerativeService" + }, + "shortName": "BatchEmbedContents" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1.types.BatchEmbedContentsRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "requests", + "type": "MutableSequence[google.ai.generativelanguage_v1.types.EmbedContentRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1.types.BatchEmbedContentsResponse", + "shortName": "batch_embed_contents" + }, + "description": "Sample for BatchEmbedContents", + "file": "generativelanguage_v1_generated_generative_service_batch_embed_contents_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1_generated_GenerativeService_BatchEmbedContents_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1_generated_generative_service_batch_embed_contents_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1.GenerativeServiceClient", + "shortName": "GenerativeServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1.GenerativeServiceClient.batch_embed_contents", + "method": { + "fullName": "google.ai.generativelanguage.v1.GenerativeService.BatchEmbedContents", + "service": { + "fullName": "google.ai.generativelanguage.v1.GenerativeService", + "shortName": "GenerativeService" + }, + "shortName": "BatchEmbedContents" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1.types.BatchEmbedContentsRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "requests", + "type": "MutableSequence[google.ai.generativelanguage_v1.types.EmbedContentRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1.types.BatchEmbedContentsResponse", + "shortName": "batch_embed_contents" + }, + "description": "Sample for BatchEmbedContents", + "file": "generativelanguage_v1_generated_generative_service_batch_embed_contents_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1_generated_GenerativeService_BatchEmbedContents_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1_generated_generative_service_batch_embed_contents_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1.GenerativeServiceAsyncClient", + "shortName": "GenerativeServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1.GenerativeServiceAsyncClient.count_tokens", + "method": { + "fullName": "google.ai.generativelanguage.v1.GenerativeService.CountTokens", + "service": { + "fullName": "google.ai.generativelanguage.v1.GenerativeService", + "shortName": "GenerativeService" + }, + "shortName": "CountTokens" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1.types.CountTokensRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "contents", + "type": "MutableSequence[google.ai.generativelanguage_v1.types.Content]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1.types.CountTokensResponse", + "shortName": "count_tokens" + }, + "description": "Sample for CountTokens", + "file": "generativelanguage_v1_generated_generative_service_count_tokens_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1_generated_GenerativeService_CountTokens_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1_generated_generative_service_count_tokens_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1.GenerativeServiceClient", + "shortName": "GenerativeServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1.GenerativeServiceClient.count_tokens", + "method": { + "fullName": "google.ai.generativelanguage.v1.GenerativeService.CountTokens", + "service": { + "fullName": "google.ai.generativelanguage.v1.GenerativeService", + "shortName": "GenerativeService" + }, + "shortName": "CountTokens" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1.types.CountTokensRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "contents", + "type": "MutableSequence[google.ai.generativelanguage_v1.types.Content]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1.types.CountTokensResponse", + "shortName": "count_tokens" + }, + "description": "Sample for CountTokens", + "file": "generativelanguage_v1_generated_generative_service_count_tokens_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1_generated_GenerativeService_CountTokens_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1_generated_generative_service_count_tokens_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1.GenerativeServiceAsyncClient", + "shortName": "GenerativeServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1.GenerativeServiceAsyncClient.embed_content", + "method": { + "fullName": "google.ai.generativelanguage.v1.GenerativeService.EmbedContent", + "service": { + "fullName": "google.ai.generativelanguage.v1.GenerativeService", + "shortName": "GenerativeService" + }, + "shortName": "EmbedContent" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1.types.EmbedContentRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "content", + "type": "google.ai.generativelanguage_v1.types.Content" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1.types.EmbedContentResponse", + "shortName": "embed_content" + }, + "description": "Sample for EmbedContent", + "file": "generativelanguage_v1_generated_generative_service_embed_content_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1_generated_GenerativeService_EmbedContent_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1_generated_generative_service_embed_content_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1.GenerativeServiceClient", + "shortName": "GenerativeServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1.GenerativeServiceClient.embed_content", + "method": { + "fullName": "google.ai.generativelanguage.v1.GenerativeService.EmbedContent", + "service": { + "fullName": "google.ai.generativelanguage.v1.GenerativeService", + "shortName": "GenerativeService" + }, + "shortName": "EmbedContent" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1.types.EmbedContentRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "content", + "type": "google.ai.generativelanguage_v1.types.Content" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1.types.EmbedContentResponse", + "shortName": "embed_content" + }, + "description": "Sample for EmbedContent", + "file": "generativelanguage_v1_generated_generative_service_embed_content_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1_generated_GenerativeService_EmbedContent_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1_generated_generative_service_embed_content_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1.GenerativeServiceAsyncClient", + "shortName": "GenerativeServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1.GenerativeServiceAsyncClient.generate_content", + "method": { + "fullName": "google.ai.generativelanguage.v1.GenerativeService.GenerateContent", + "service": { + "fullName": "google.ai.generativelanguage.v1.GenerativeService", + "shortName": "GenerativeService" + }, + "shortName": "GenerateContent" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1.types.GenerateContentRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "contents", + "type": "MutableSequence[google.ai.generativelanguage_v1.types.Content]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1.types.GenerateContentResponse", + "shortName": "generate_content" + }, + "description": "Sample for GenerateContent", + "file": "generativelanguage_v1_generated_generative_service_generate_content_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1_generated_GenerativeService_GenerateContent_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1_generated_generative_service_generate_content_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1.GenerativeServiceClient", + "shortName": "GenerativeServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1.GenerativeServiceClient.generate_content", + "method": { + "fullName": "google.ai.generativelanguage.v1.GenerativeService.GenerateContent", + "service": { + "fullName": "google.ai.generativelanguage.v1.GenerativeService", + "shortName": "GenerativeService" + }, + "shortName": "GenerateContent" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1.types.GenerateContentRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "contents", + "type": "MutableSequence[google.ai.generativelanguage_v1.types.Content]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1.types.GenerateContentResponse", + "shortName": "generate_content" + }, + "description": "Sample for GenerateContent", + "file": "generativelanguage_v1_generated_generative_service_generate_content_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1_generated_GenerativeService_GenerateContent_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1_generated_generative_service_generate_content_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1.GenerativeServiceAsyncClient", + "shortName": "GenerativeServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1.GenerativeServiceAsyncClient.stream_generate_content", + "method": { + "fullName": "google.ai.generativelanguage.v1.GenerativeService.StreamGenerateContent", + "service": { + "fullName": "google.ai.generativelanguage.v1.GenerativeService", + "shortName": "GenerativeService" + }, + "shortName": "StreamGenerateContent" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1.types.GenerateContentRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "contents", + "type": "MutableSequence[google.ai.generativelanguage_v1.types.Content]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.ai.generativelanguage_v1.types.GenerateContentResponse]", + "shortName": "stream_generate_content" + }, + "description": "Sample for StreamGenerateContent", + "file": "generativelanguage_v1_generated_generative_service_stream_generate_content_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1_generated_GenerativeService_StreamGenerateContent_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1_generated_generative_service_stream_generate_content_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1.GenerativeServiceClient", + "shortName": "GenerativeServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1.GenerativeServiceClient.stream_generate_content", + "method": { + "fullName": "google.ai.generativelanguage.v1.GenerativeService.StreamGenerateContent", + "service": { + "fullName": "google.ai.generativelanguage.v1.GenerativeService", + "shortName": "GenerativeService" + }, + "shortName": "StreamGenerateContent" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1.types.GenerateContentRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "contents", + "type": "MutableSequence[google.ai.generativelanguage_v1.types.Content]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.ai.generativelanguage_v1.types.GenerateContentResponse]", + "shortName": "stream_generate_content" + }, + "description": "Sample for StreamGenerateContent", + "file": "generativelanguage_v1_generated_generative_service_stream_generate_content_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1_generated_GenerativeService_StreamGenerateContent_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1_generated_generative_service_stream_generate_content_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1.ModelServiceAsyncClient.get_model", + "method": { + "fullName": "google.ai.generativelanguage.v1.ModelService.GetModel", + "service": { + "fullName": "google.ai.generativelanguage.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "GetModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1.types.GetModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1.types.Model", + "shortName": "get_model" + }, + "description": "Sample for GetModel", + "file": "generativelanguage_v1_generated_model_service_get_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1_generated_ModelService_GetModel_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1_generated_model_service_get_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1.ModelServiceClient.get_model", + "method": { + "fullName": "google.ai.generativelanguage.v1.ModelService.GetModel", + "service": { + "fullName": "google.ai.generativelanguage.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "GetModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1.types.GetModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1.types.Model", + "shortName": "get_model" + }, + "description": "Sample for GetModel", + "file": "generativelanguage_v1_generated_model_service_get_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1_generated_ModelService_GetModel_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1_generated_model_service_get_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1.ModelServiceAsyncClient.list_models", + "method": { + "fullName": "google.ai.generativelanguage.v1.ModelService.ListModels", + "service": { + "fullName": "google.ai.generativelanguage.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ListModels" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1.types.ListModelsRequest" + }, + { + "name": "page_size", + "type": "int" + }, + { + "name": "page_token", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1.services.model_service.pagers.ListModelsAsyncPager", + "shortName": "list_models" + }, + "description": "Sample for ListModels", + "file": "generativelanguage_v1_generated_model_service_list_models_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1_generated_ModelService_ListModels_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1_generated_model_service_list_models_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1.ModelServiceClient.list_models", + "method": { + "fullName": "google.ai.generativelanguage.v1.ModelService.ListModels", + "service": { + "fullName": "google.ai.generativelanguage.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ListModels" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1.types.ListModelsRequest" + }, + { + "name": "page_size", + "type": "int" + }, + { + "name": "page_token", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1.services.model_service.pagers.ListModelsPager", + "shortName": "list_models" + }, + "description": "Sample for ListModels", + "file": "generativelanguage_v1_generated_model_service_list_models_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1_generated_ModelService_ListModels_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1_generated_model_service_list_models_sync.py" + } + ] +} diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1beta.json b/packages/google-ai-generativelanguage/samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1beta.json new file mode 100644 index 000000000000..1755de17b8e8 --- /dev/null +++ b/packages/google-ai-generativelanguage/samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1beta.json @@ -0,0 +1,7440 @@ +{ + "clientLibrary": { + "apis": [ + { + "id": "google.ai.generativelanguage.v1beta", + "version": "v1beta" + } + ], + "language": "PYTHON", + "name": "google-ai-generativelanguage", + "version": "0.1.0" + }, + "snippets": [ + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.DiscussServiceAsyncClient", + "shortName": "DiscussServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.DiscussServiceAsyncClient.count_message_tokens", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.DiscussService.CountMessageTokens", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.DiscussService", + "shortName": "DiscussService" + }, + "shortName": "CountMessageTokens" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.CountMessageTokensRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "prompt", + "type": "google.ai.generativelanguage_v1beta.types.MessagePrompt" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.CountMessageTokensResponse", + "shortName": "count_message_tokens" + }, + "description": "Sample for CountMessageTokens", + "file": "generativelanguage_v1beta_generated_discuss_service_count_message_tokens_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_DiscussService_CountMessageTokens_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_discuss_service_count_message_tokens_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.DiscussServiceClient", + "shortName": "DiscussServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.DiscussServiceClient.count_message_tokens", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.DiscussService.CountMessageTokens", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.DiscussService", + "shortName": "DiscussService" + }, + "shortName": "CountMessageTokens" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.CountMessageTokensRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "prompt", + "type": "google.ai.generativelanguage_v1beta.types.MessagePrompt" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.CountMessageTokensResponse", + "shortName": "count_message_tokens" + }, + "description": "Sample for CountMessageTokens", + "file": "generativelanguage_v1beta_generated_discuss_service_count_message_tokens_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_DiscussService_CountMessageTokens_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_discuss_service_count_message_tokens_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.DiscussServiceAsyncClient", + "shortName": "DiscussServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.DiscussServiceAsyncClient.generate_message", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.DiscussService.GenerateMessage", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.DiscussService", + "shortName": "DiscussService" + }, + "shortName": "GenerateMessage" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.GenerateMessageRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "prompt", + "type": "google.ai.generativelanguage_v1beta.types.MessagePrompt" + }, + { + "name": "temperature", + "type": "float" + }, + { + "name": "candidate_count", + "type": "int" + }, + { + "name": "top_p", + "type": "float" + }, + { + "name": "top_k", + "type": "int" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.GenerateMessageResponse", + "shortName": "generate_message" + }, + "description": "Sample for GenerateMessage", + "file": "generativelanguage_v1beta_generated_discuss_service_generate_message_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_DiscussService_GenerateMessage_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_discuss_service_generate_message_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.DiscussServiceClient", + "shortName": "DiscussServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.DiscussServiceClient.generate_message", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.DiscussService.GenerateMessage", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.DiscussService", + "shortName": "DiscussService" + }, + "shortName": "GenerateMessage" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.GenerateMessageRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "prompt", + "type": "google.ai.generativelanguage_v1beta.types.MessagePrompt" + }, + { + "name": "temperature", + "type": "float" + }, + { + "name": "candidate_count", + "type": "int" + }, + { + "name": "top_p", + "type": "float" + }, + { + "name": "top_k", + "type": "int" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.GenerateMessageResponse", + "shortName": "generate_message" + }, + "description": "Sample for GenerateMessage", + "file": "generativelanguage_v1beta_generated_discuss_service_generate_message_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_DiscussService_GenerateMessage_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_discuss_service_generate_message_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.GenerativeServiceAsyncClient", + "shortName": "GenerativeServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.GenerativeServiceAsyncClient.batch_embed_contents", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.GenerativeService.BatchEmbedContents", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.GenerativeService", + "shortName": "GenerativeService" + }, + "shortName": "BatchEmbedContents" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.BatchEmbedContentsRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "requests", + "type": "MutableSequence[google.ai.generativelanguage_v1beta.types.EmbedContentRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.BatchEmbedContentsResponse", + "shortName": "batch_embed_contents" + }, + "description": "Sample for BatchEmbedContents", + "file": "generativelanguage_v1beta_generated_generative_service_batch_embed_contents_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_GenerativeService_BatchEmbedContents_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_generative_service_batch_embed_contents_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.GenerativeServiceClient", + "shortName": "GenerativeServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.GenerativeServiceClient.batch_embed_contents", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.GenerativeService.BatchEmbedContents", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.GenerativeService", + "shortName": "GenerativeService" + }, + "shortName": "BatchEmbedContents" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.BatchEmbedContentsRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "requests", + "type": "MutableSequence[google.ai.generativelanguage_v1beta.types.EmbedContentRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.BatchEmbedContentsResponse", + "shortName": "batch_embed_contents" + }, + "description": "Sample for BatchEmbedContents", + "file": "generativelanguage_v1beta_generated_generative_service_batch_embed_contents_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_GenerativeService_BatchEmbedContents_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_generative_service_batch_embed_contents_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.GenerativeServiceAsyncClient", + "shortName": "GenerativeServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.GenerativeServiceAsyncClient.count_tokens", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.GenerativeService.CountTokens", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.GenerativeService", + "shortName": "GenerativeService" + }, + "shortName": "CountTokens" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.CountTokensRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "contents", + "type": "MutableSequence[google.ai.generativelanguage_v1beta.types.Content]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.CountTokensResponse", + "shortName": "count_tokens" + }, + "description": "Sample for CountTokens", + "file": "generativelanguage_v1beta_generated_generative_service_count_tokens_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_GenerativeService_CountTokens_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_generative_service_count_tokens_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.GenerativeServiceClient", + "shortName": "GenerativeServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.GenerativeServiceClient.count_tokens", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.GenerativeService.CountTokens", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.GenerativeService", + "shortName": "GenerativeService" + }, + "shortName": "CountTokens" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.CountTokensRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "contents", + "type": "MutableSequence[google.ai.generativelanguage_v1beta.types.Content]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.CountTokensResponse", + "shortName": "count_tokens" + }, + "description": "Sample for CountTokens", + "file": "generativelanguage_v1beta_generated_generative_service_count_tokens_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_GenerativeService_CountTokens_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_generative_service_count_tokens_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.GenerativeServiceAsyncClient", + "shortName": "GenerativeServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.GenerativeServiceAsyncClient.embed_content", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.GenerativeService.EmbedContent", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.GenerativeService", + "shortName": "GenerativeService" + }, + "shortName": "EmbedContent" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.EmbedContentRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "content", + "type": "google.ai.generativelanguage_v1beta.types.Content" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.EmbedContentResponse", + "shortName": "embed_content" + }, + "description": "Sample for EmbedContent", + "file": "generativelanguage_v1beta_generated_generative_service_embed_content_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_GenerativeService_EmbedContent_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_generative_service_embed_content_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.GenerativeServiceClient", + "shortName": "GenerativeServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.GenerativeServiceClient.embed_content", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.GenerativeService.EmbedContent", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.GenerativeService", + "shortName": "GenerativeService" + }, + "shortName": "EmbedContent" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.EmbedContentRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "content", + "type": "google.ai.generativelanguage_v1beta.types.Content" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.EmbedContentResponse", + "shortName": "embed_content" + }, + "description": "Sample for EmbedContent", + "file": "generativelanguage_v1beta_generated_generative_service_embed_content_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_GenerativeService_EmbedContent_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_generative_service_embed_content_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.GenerativeServiceAsyncClient", + "shortName": "GenerativeServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.GenerativeServiceAsyncClient.generate_answer", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.GenerativeService.GenerateAnswer", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.GenerativeService", + "shortName": "GenerativeService" + }, + "shortName": "GenerateAnswer" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.GenerateAnswerRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "contents", + "type": "MutableSequence[google.ai.generativelanguage_v1beta.types.Content]" + }, + { + "name": "safety_settings", + "type": "MutableSequence[google.ai.generativelanguage_v1beta.types.SafetySetting]" + }, + { + "name": "answer_style", + "type": "google.ai.generativelanguage_v1beta.types.GenerateAnswerRequest.AnswerStyle" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.GenerateAnswerResponse", + "shortName": "generate_answer" + }, + "description": "Sample for GenerateAnswer", + "file": "generativelanguage_v1beta_generated_generative_service_generate_answer_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_GenerativeService_GenerateAnswer_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_generative_service_generate_answer_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.GenerativeServiceClient", + "shortName": "GenerativeServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.GenerativeServiceClient.generate_answer", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.GenerativeService.GenerateAnswer", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.GenerativeService", + "shortName": "GenerativeService" + }, + "shortName": "GenerateAnswer" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.GenerateAnswerRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "contents", + "type": "MutableSequence[google.ai.generativelanguage_v1beta.types.Content]" + }, + { + "name": "safety_settings", + "type": "MutableSequence[google.ai.generativelanguage_v1beta.types.SafetySetting]" + }, + { + "name": "answer_style", + "type": "google.ai.generativelanguage_v1beta.types.GenerateAnswerRequest.AnswerStyle" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.GenerateAnswerResponse", + "shortName": "generate_answer" + }, + "description": "Sample for GenerateAnswer", + "file": "generativelanguage_v1beta_generated_generative_service_generate_answer_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_GenerativeService_GenerateAnswer_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_generative_service_generate_answer_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.GenerativeServiceAsyncClient", + "shortName": "GenerativeServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.GenerativeServiceAsyncClient.generate_content", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.GenerativeService.GenerateContent", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.GenerativeService", + "shortName": "GenerativeService" + }, + "shortName": "GenerateContent" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.GenerateContentRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "contents", + "type": "MutableSequence[google.ai.generativelanguage_v1beta.types.Content]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.GenerateContentResponse", + "shortName": "generate_content" + }, + "description": "Sample for GenerateContent", + "file": "generativelanguage_v1beta_generated_generative_service_generate_content_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_GenerativeService_GenerateContent_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_generative_service_generate_content_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.GenerativeServiceClient", + "shortName": "GenerativeServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.GenerativeServiceClient.generate_content", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.GenerativeService.GenerateContent", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.GenerativeService", + "shortName": "GenerativeService" + }, + "shortName": "GenerateContent" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.GenerateContentRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "contents", + "type": "MutableSequence[google.ai.generativelanguage_v1beta.types.Content]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.GenerateContentResponse", + "shortName": "generate_content" + }, + "description": "Sample for GenerateContent", + "file": "generativelanguage_v1beta_generated_generative_service_generate_content_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_GenerativeService_GenerateContent_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_generative_service_generate_content_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.GenerativeServiceAsyncClient", + "shortName": "GenerativeServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.GenerativeServiceAsyncClient.stream_generate_content", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.GenerativeService.StreamGenerateContent", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.GenerativeService", + "shortName": "GenerativeService" + }, + "shortName": "StreamGenerateContent" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.GenerateContentRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "contents", + "type": "MutableSequence[google.ai.generativelanguage_v1beta.types.Content]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.ai.generativelanguage_v1beta.types.GenerateContentResponse]", + "shortName": "stream_generate_content" + }, + "description": "Sample for StreamGenerateContent", + "file": "generativelanguage_v1beta_generated_generative_service_stream_generate_content_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_GenerativeService_StreamGenerateContent_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_generative_service_stream_generate_content_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.GenerativeServiceClient", + "shortName": "GenerativeServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.GenerativeServiceClient.stream_generate_content", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.GenerativeService.StreamGenerateContent", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.GenerativeService", + "shortName": "GenerativeService" + }, + "shortName": "StreamGenerateContent" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.GenerateContentRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "contents", + "type": "MutableSequence[google.ai.generativelanguage_v1beta.types.Content]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.ai.generativelanguage_v1beta.types.GenerateContentResponse]", + "shortName": "stream_generate_content" + }, + "description": "Sample for StreamGenerateContent", + "file": "generativelanguage_v1beta_generated_generative_service_stream_generate_content_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_GenerativeService_StreamGenerateContent_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_generative_service_stream_generate_content_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.ModelServiceAsyncClient.create_tuned_model", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.ModelService.CreateTunedModel", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.ModelService", + "shortName": "ModelService" + }, + "shortName": "CreateTunedModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.CreateTunedModelRequest" + }, + { + "name": "tuned_model", + "type": "google.ai.generativelanguage_v1beta.types.TunedModel" + }, + { + "name": "tuned_model_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_tuned_model" + }, + "description": "Sample for CreateTunedModel", + "file": "generativelanguage_v1beta_generated_model_service_create_tuned_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_ModelService_CreateTunedModel_async", + "segments": [ + { + "end": 59, + "start": 27, + "type": "FULL" + }, + { + "end": 59, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 56, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 60, + "start": 57, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_model_service_create_tuned_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.ModelServiceClient.create_tuned_model", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.ModelService.CreateTunedModel", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.ModelService", + "shortName": "ModelService" + }, + "shortName": "CreateTunedModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.CreateTunedModelRequest" + }, + { + "name": "tuned_model", + "type": "google.ai.generativelanguage_v1beta.types.TunedModel" + }, + { + "name": "tuned_model_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_tuned_model" + }, + "description": "Sample for CreateTunedModel", + "file": "generativelanguage_v1beta_generated_model_service_create_tuned_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_ModelService_CreateTunedModel_sync", + "segments": [ + { + "end": 59, + "start": 27, + "type": "FULL" + }, + { + "end": 59, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 56, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 60, + "start": 57, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_model_service_create_tuned_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.ModelServiceAsyncClient.delete_tuned_model", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.ModelService.DeleteTunedModel", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.ModelService", + "shortName": "ModelService" + }, + "shortName": "DeleteTunedModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.DeleteTunedModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "delete_tuned_model" + }, + "description": "Sample for DeleteTunedModel", + "file": "generativelanguage_v1beta_generated_model_service_delete_tuned_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_ModelService_DeleteTunedModel_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_model_service_delete_tuned_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.ModelServiceClient.delete_tuned_model", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.ModelService.DeleteTunedModel", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.ModelService", + "shortName": "ModelService" + }, + "shortName": "DeleteTunedModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.DeleteTunedModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "delete_tuned_model" + }, + "description": "Sample for DeleteTunedModel", + "file": "generativelanguage_v1beta_generated_model_service_delete_tuned_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_ModelService_DeleteTunedModel_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_model_service_delete_tuned_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.ModelServiceAsyncClient.get_model", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.ModelService.GetModel", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.ModelService", + "shortName": "ModelService" + }, + "shortName": "GetModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.GetModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.Model", + "shortName": "get_model" + }, + "description": "Sample for GetModel", + "file": "generativelanguage_v1beta_generated_model_service_get_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_ModelService_GetModel_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_model_service_get_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.ModelServiceClient.get_model", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.ModelService.GetModel", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.ModelService", + "shortName": "ModelService" + }, + "shortName": "GetModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.GetModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.Model", + "shortName": "get_model" + }, + "description": "Sample for GetModel", + "file": "generativelanguage_v1beta_generated_model_service_get_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_ModelService_GetModel_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_model_service_get_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.ModelServiceAsyncClient.get_tuned_model", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.ModelService.GetTunedModel", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.ModelService", + "shortName": "ModelService" + }, + "shortName": "GetTunedModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.GetTunedModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.TunedModel", + "shortName": "get_tuned_model" + }, + "description": "Sample for GetTunedModel", + "file": "generativelanguage_v1beta_generated_model_service_get_tuned_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_ModelService_GetTunedModel_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_model_service_get_tuned_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.ModelServiceClient.get_tuned_model", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.ModelService.GetTunedModel", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.ModelService", + "shortName": "ModelService" + }, + "shortName": "GetTunedModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.GetTunedModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.TunedModel", + "shortName": "get_tuned_model" + }, + "description": "Sample for GetTunedModel", + "file": "generativelanguage_v1beta_generated_model_service_get_tuned_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_ModelService_GetTunedModel_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_model_service_get_tuned_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.ModelServiceAsyncClient.list_models", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.ModelService.ListModels", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.ModelService", + "shortName": "ModelService" + }, + "shortName": "ListModels" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.ListModelsRequest" + }, + { + "name": "page_size", + "type": "int" + }, + { + "name": "page_token", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.services.model_service.pagers.ListModelsAsyncPager", + "shortName": "list_models" + }, + "description": "Sample for ListModels", + "file": "generativelanguage_v1beta_generated_model_service_list_models_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_ModelService_ListModels_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_model_service_list_models_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.ModelServiceClient.list_models", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.ModelService.ListModels", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.ModelService", + "shortName": "ModelService" + }, + "shortName": "ListModels" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.ListModelsRequest" + }, + { + "name": "page_size", + "type": "int" + }, + { + "name": "page_token", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.services.model_service.pagers.ListModelsPager", + "shortName": "list_models" + }, + "description": "Sample for ListModels", + "file": "generativelanguage_v1beta_generated_model_service_list_models_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_ModelService_ListModels_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_model_service_list_models_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.ModelServiceAsyncClient.list_tuned_models", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.ModelService.ListTunedModels", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.ModelService", + "shortName": "ModelService" + }, + "shortName": "ListTunedModels" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.ListTunedModelsRequest" + }, + { + "name": "page_size", + "type": "int" + }, + { + "name": "page_token", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.services.model_service.pagers.ListTunedModelsAsyncPager", + "shortName": "list_tuned_models" + }, + "description": "Sample for ListTunedModels", + "file": "generativelanguage_v1beta_generated_model_service_list_tuned_models_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_ModelService_ListTunedModels_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_model_service_list_tuned_models_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.ModelServiceClient.list_tuned_models", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.ModelService.ListTunedModels", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.ModelService", + "shortName": "ModelService" + }, + "shortName": "ListTunedModels" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.ListTunedModelsRequest" + }, + { + "name": "page_size", + "type": "int" + }, + { + "name": "page_token", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.services.model_service.pagers.ListTunedModelsPager", + "shortName": "list_tuned_models" + }, + "description": "Sample for ListTunedModels", + "file": "generativelanguage_v1beta_generated_model_service_list_tuned_models_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_ModelService_ListTunedModels_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_model_service_list_tuned_models_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.ModelServiceAsyncClient.update_tuned_model", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.ModelService.UpdateTunedModel", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.ModelService", + "shortName": "ModelService" + }, + "shortName": "UpdateTunedModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.UpdateTunedModelRequest" + }, + { + "name": "tuned_model", + "type": "google.ai.generativelanguage_v1beta.types.TunedModel" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.TunedModel", + "shortName": "update_tuned_model" + }, + "description": "Sample for UpdateTunedModel", + "file": "generativelanguage_v1beta_generated_model_service_update_tuned_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_ModelService_UpdateTunedModel_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_model_service_update_tuned_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.ModelServiceClient.update_tuned_model", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.ModelService.UpdateTunedModel", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.ModelService", + "shortName": "ModelService" + }, + "shortName": "UpdateTunedModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.UpdateTunedModelRequest" + }, + { + "name": "tuned_model", + "type": "google.ai.generativelanguage_v1beta.types.TunedModel" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.TunedModel", + "shortName": "update_tuned_model" + }, + "description": "Sample for UpdateTunedModel", + "file": "generativelanguage_v1beta_generated_model_service_update_tuned_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_ModelService_UpdateTunedModel_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_model_service_update_tuned_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.PermissionServiceAsyncClient", + "shortName": "PermissionServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.PermissionServiceAsyncClient.create_permission", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.PermissionService.CreatePermission", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.PermissionService", + "shortName": "PermissionService" + }, + "shortName": "CreatePermission" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.CreatePermissionRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "permission", + "type": "google.ai.generativelanguage_v1beta.types.Permission" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.Permission", + "shortName": "create_permission" + }, + "description": "Sample for CreatePermission", + "file": "generativelanguage_v1beta_generated_permission_service_create_permission_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_PermissionService_CreatePermission_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_permission_service_create_permission_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.PermissionServiceClient", + "shortName": "PermissionServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.PermissionServiceClient.create_permission", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.PermissionService.CreatePermission", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.PermissionService", + "shortName": "PermissionService" + }, + "shortName": "CreatePermission" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.CreatePermissionRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "permission", + "type": "google.ai.generativelanguage_v1beta.types.Permission" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.Permission", + "shortName": "create_permission" + }, + "description": "Sample for CreatePermission", + "file": "generativelanguage_v1beta_generated_permission_service_create_permission_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_PermissionService_CreatePermission_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_permission_service_create_permission_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.PermissionServiceAsyncClient", + "shortName": "PermissionServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.PermissionServiceAsyncClient.delete_permission", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.PermissionService.DeletePermission", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.PermissionService", + "shortName": "PermissionService" + }, + "shortName": "DeletePermission" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.DeletePermissionRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "delete_permission" + }, + "description": "Sample for DeletePermission", + "file": "generativelanguage_v1beta_generated_permission_service_delete_permission_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_PermissionService_DeletePermission_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_permission_service_delete_permission_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.PermissionServiceClient", + "shortName": "PermissionServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.PermissionServiceClient.delete_permission", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.PermissionService.DeletePermission", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.PermissionService", + "shortName": "PermissionService" + }, + "shortName": "DeletePermission" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.DeletePermissionRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "delete_permission" + }, + "description": "Sample for DeletePermission", + "file": "generativelanguage_v1beta_generated_permission_service_delete_permission_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_PermissionService_DeletePermission_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_permission_service_delete_permission_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.PermissionServiceAsyncClient", + "shortName": "PermissionServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.PermissionServiceAsyncClient.get_permission", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.PermissionService.GetPermission", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.PermissionService", + "shortName": "PermissionService" + }, + "shortName": "GetPermission" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.GetPermissionRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.Permission", + "shortName": "get_permission" + }, + "description": "Sample for GetPermission", + "file": "generativelanguage_v1beta_generated_permission_service_get_permission_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_PermissionService_GetPermission_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_permission_service_get_permission_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.PermissionServiceClient", + "shortName": "PermissionServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.PermissionServiceClient.get_permission", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.PermissionService.GetPermission", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.PermissionService", + "shortName": "PermissionService" + }, + "shortName": "GetPermission" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.GetPermissionRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.Permission", + "shortName": "get_permission" + }, + "description": "Sample for GetPermission", + "file": "generativelanguage_v1beta_generated_permission_service_get_permission_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_PermissionService_GetPermission_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_permission_service_get_permission_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.PermissionServiceAsyncClient", + "shortName": "PermissionServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.PermissionServiceAsyncClient.list_permissions", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.PermissionService.ListPermissions", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.PermissionService", + "shortName": "PermissionService" + }, + "shortName": "ListPermissions" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.ListPermissionsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.services.permission_service.pagers.ListPermissionsAsyncPager", + "shortName": "list_permissions" + }, + "description": "Sample for ListPermissions", + "file": "generativelanguage_v1beta_generated_permission_service_list_permissions_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_PermissionService_ListPermissions_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_permission_service_list_permissions_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.PermissionServiceClient", + "shortName": "PermissionServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.PermissionServiceClient.list_permissions", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.PermissionService.ListPermissions", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.PermissionService", + "shortName": "PermissionService" + }, + "shortName": "ListPermissions" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.ListPermissionsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.services.permission_service.pagers.ListPermissionsPager", + "shortName": "list_permissions" + }, + "description": "Sample for ListPermissions", + "file": "generativelanguage_v1beta_generated_permission_service_list_permissions_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_PermissionService_ListPermissions_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_permission_service_list_permissions_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.PermissionServiceAsyncClient", + "shortName": "PermissionServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.PermissionServiceAsyncClient.transfer_ownership", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.PermissionService.TransferOwnership", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.PermissionService", + "shortName": "PermissionService" + }, + "shortName": "TransferOwnership" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.TransferOwnershipRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.TransferOwnershipResponse", + "shortName": "transfer_ownership" + }, + "description": "Sample for TransferOwnership", + "file": "generativelanguage_v1beta_generated_permission_service_transfer_ownership_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_PermissionService_TransferOwnership_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_permission_service_transfer_ownership_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.PermissionServiceClient", + "shortName": "PermissionServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.PermissionServiceClient.transfer_ownership", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.PermissionService.TransferOwnership", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.PermissionService", + "shortName": "PermissionService" + }, + "shortName": "TransferOwnership" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.TransferOwnershipRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.TransferOwnershipResponse", + "shortName": "transfer_ownership" + }, + "description": "Sample for TransferOwnership", + "file": "generativelanguage_v1beta_generated_permission_service_transfer_ownership_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_PermissionService_TransferOwnership_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_permission_service_transfer_ownership_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.PermissionServiceAsyncClient", + "shortName": "PermissionServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.PermissionServiceAsyncClient.update_permission", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.PermissionService.UpdatePermission", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.PermissionService", + "shortName": "PermissionService" + }, + "shortName": "UpdatePermission" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.UpdatePermissionRequest" + }, + { + "name": "permission", + "type": "google.ai.generativelanguage_v1beta.types.Permission" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.Permission", + "shortName": "update_permission" + }, + "description": "Sample for UpdatePermission", + "file": "generativelanguage_v1beta_generated_permission_service_update_permission_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_PermissionService_UpdatePermission_async", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_permission_service_update_permission_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.PermissionServiceClient", + "shortName": "PermissionServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.PermissionServiceClient.update_permission", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.PermissionService.UpdatePermission", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.PermissionService", + "shortName": "PermissionService" + }, + "shortName": "UpdatePermission" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.UpdatePermissionRequest" + }, + { + "name": "permission", + "type": "google.ai.generativelanguage_v1beta.types.Permission" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.Permission", + "shortName": "update_permission" + }, + "description": "Sample for UpdatePermission", + "file": "generativelanguage_v1beta_generated_permission_service_update_permission_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_PermissionService_UpdatePermission_sync", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_permission_service_update_permission_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient", + "shortName": "RetrieverServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient.batch_create_chunks", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.BatchCreateChunks", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "BatchCreateChunks" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.BatchCreateChunksRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.BatchCreateChunksResponse", + "shortName": "batch_create_chunks" + }, + "description": "Sample for BatchCreateChunks", + "file": "generativelanguage_v1beta_generated_retriever_service_batch_create_chunks_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_BatchCreateChunks_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_batch_create_chunks_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient", + "shortName": "RetrieverServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient.batch_create_chunks", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.BatchCreateChunks", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "BatchCreateChunks" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.BatchCreateChunksRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.BatchCreateChunksResponse", + "shortName": "batch_create_chunks" + }, + "description": "Sample for BatchCreateChunks", + "file": "generativelanguage_v1beta_generated_retriever_service_batch_create_chunks_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_BatchCreateChunks_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_batch_create_chunks_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient", + "shortName": "RetrieverServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient.batch_delete_chunks", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.BatchDeleteChunks", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "BatchDeleteChunks" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.BatchDeleteChunksRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "batch_delete_chunks" + }, + "description": "Sample for BatchDeleteChunks", + "file": "generativelanguage_v1beta_generated_retriever_service_batch_delete_chunks_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_BatchDeleteChunks_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_batch_delete_chunks_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient", + "shortName": "RetrieverServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient.batch_delete_chunks", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.BatchDeleteChunks", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "BatchDeleteChunks" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.BatchDeleteChunksRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "batch_delete_chunks" + }, + "description": "Sample for BatchDeleteChunks", + "file": "generativelanguage_v1beta_generated_retriever_service_batch_delete_chunks_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_BatchDeleteChunks_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_batch_delete_chunks_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient", + "shortName": "RetrieverServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient.batch_update_chunks", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.BatchUpdateChunks", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "BatchUpdateChunks" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.BatchUpdateChunksRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.BatchUpdateChunksResponse", + "shortName": "batch_update_chunks" + }, + "description": "Sample for BatchUpdateChunks", + "file": "generativelanguage_v1beta_generated_retriever_service_batch_update_chunks_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_BatchUpdateChunks_async", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_batch_update_chunks_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient", + "shortName": "RetrieverServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient.batch_update_chunks", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.BatchUpdateChunks", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "BatchUpdateChunks" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.BatchUpdateChunksRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.BatchUpdateChunksResponse", + "shortName": "batch_update_chunks" + }, + "description": "Sample for BatchUpdateChunks", + "file": "generativelanguage_v1beta_generated_retriever_service_batch_update_chunks_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_BatchUpdateChunks_sync", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_batch_update_chunks_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient", + "shortName": "RetrieverServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient.create_chunk", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.CreateChunk", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "CreateChunk" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.CreateChunkRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "chunk", + "type": "google.ai.generativelanguage_v1beta.types.Chunk" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.Chunk", + "shortName": "create_chunk" + }, + "description": "Sample for CreateChunk", + "file": "generativelanguage_v1beta_generated_retriever_service_create_chunk_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_CreateChunk_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_create_chunk_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient", + "shortName": "RetrieverServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient.create_chunk", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.CreateChunk", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "CreateChunk" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.CreateChunkRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "chunk", + "type": "google.ai.generativelanguage_v1beta.types.Chunk" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.Chunk", + "shortName": "create_chunk" + }, + "description": "Sample for CreateChunk", + "file": "generativelanguage_v1beta_generated_retriever_service_create_chunk_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_CreateChunk_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_create_chunk_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient", + "shortName": "RetrieverServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient.create_corpus", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.CreateCorpus", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "CreateCorpus" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.CreateCorpusRequest" + }, + { + "name": "corpus", + "type": "google.ai.generativelanguage_v1beta.types.Corpus" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.Corpus", + "shortName": "create_corpus" + }, + "description": "Sample for CreateCorpus", + "file": "generativelanguage_v1beta_generated_retriever_service_create_corpus_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_CreateCorpus_async", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_create_corpus_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient", + "shortName": "RetrieverServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient.create_corpus", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.CreateCorpus", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "CreateCorpus" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.CreateCorpusRequest" + }, + { + "name": "corpus", + "type": "google.ai.generativelanguage_v1beta.types.Corpus" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.Corpus", + "shortName": "create_corpus" + }, + "description": "Sample for CreateCorpus", + "file": "generativelanguage_v1beta_generated_retriever_service_create_corpus_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_CreateCorpus_sync", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_create_corpus_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient", + "shortName": "RetrieverServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient.create_document", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.CreateDocument", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "CreateDocument" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.CreateDocumentRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "document", + "type": "google.ai.generativelanguage_v1beta.types.Document" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.Document", + "shortName": "create_document" + }, + "description": "Sample for CreateDocument", + "file": "generativelanguage_v1beta_generated_retriever_service_create_document_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_CreateDocument_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_create_document_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient", + "shortName": "RetrieverServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient.create_document", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.CreateDocument", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "CreateDocument" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.CreateDocumentRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "document", + "type": "google.ai.generativelanguage_v1beta.types.Document" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.Document", + "shortName": "create_document" + }, + "description": "Sample for CreateDocument", + "file": "generativelanguage_v1beta_generated_retriever_service_create_document_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_CreateDocument_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_create_document_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient", + "shortName": "RetrieverServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient.delete_chunk", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.DeleteChunk", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "DeleteChunk" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.DeleteChunkRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "delete_chunk" + }, + "description": "Sample for DeleteChunk", + "file": "generativelanguage_v1beta_generated_retriever_service_delete_chunk_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_DeleteChunk_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_delete_chunk_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient", + "shortName": "RetrieverServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient.delete_chunk", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.DeleteChunk", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "DeleteChunk" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.DeleteChunkRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "delete_chunk" + }, + "description": "Sample for DeleteChunk", + "file": "generativelanguage_v1beta_generated_retriever_service_delete_chunk_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_DeleteChunk_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_delete_chunk_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient", + "shortName": "RetrieverServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient.delete_corpus", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.DeleteCorpus", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "DeleteCorpus" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.DeleteCorpusRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "delete_corpus" + }, + "description": "Sample for DeleteCorpus", + "file": "generativelanguage_v1beta_generated_retriever_service_delete_corpus_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_DeleteCorpus_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_delete_corpus_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient", + "shortName": "RetrieverServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient.delete_corpus", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.DeleteCorpus", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "DeleteCorpus" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.DeleteCorpusRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "delete_corpus" + }, + "description": "Sample for DeleteCorpus", + "file": "generativelanguage_v1beta_generated_retriever_service_delete_corpus_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_DeleteCorpus_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_delete_corpus_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient", + "shortName": "RetrieverServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient.delete_document", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.DeleteDocument", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "DeleteDocument" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.DeleteDocumentRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "delete_document" + }, + "description": "Sample for DeleteDocument", + "file": "generativelanguage_v1beta_generated_retriever_service_delete_document_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_DeleteDocument_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_delete_document_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient", + "shortName": "RetrieverServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient.delete_document", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.DeleteDocument", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "DeleteDocument" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.DeleteDocumentRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "delete_document" + }, + "description": "Sample for DeleteDocument", + "file": "generativelanguage_v1beta_generated_retriever_service_delete_document_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_DeleteDocument_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_delete_document_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient", + "shortName": "RetrieverServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient.get_chunk", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.GetChunk", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "GetChunk" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.GetChunkRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.Chunk", + "shortName": "get_chunk" + }, + "description": "Sample for GetChunk", + "file": "generativelanguage_v1beta_generated_retriever_service_get_chunk_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_GetChunk_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_get_chunk_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient", + "shortName": "RetrieverServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient.get_chunk", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.GetChunk", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "GetChunk" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.GetChunkRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.Chunk", + "shortName": "get_chunk" + }, + "description": "Sample for GetChunk", + "file": "generativelanguage_v1beta_generated_retriever_service_get_chunk_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_GetChunk_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_get_chunk_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient", + "shortName": "RetrieverServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient.get_corpus", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.GetCorpus", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "GetCorpus" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.GetCorpusRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.Corpus", + "shortName": "get_corpus" + }, + "description": "Sample for GetCorpus", + "file": "generativelanguage_v1beta_generated_retriever_service_get_corpus_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_GetCorpus_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_get_corpus_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient", + "shortName": "RetrieverServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient.get_corpus", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.GetCorpus", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "GetCorpus" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.GetCorpusRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.Corpus", + "shortName": "get_corpus" + }, + "description": "Sample for GetCorpus", + "file": "generativelanguage_v1beta_generated_retriever_service_get_corpus_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_GetCorpus_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_get_corpus_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient", + "shortName": "RetrieverServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient.get_document", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.GetDocument", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "GetDocument" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.GetDocumentRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.Document", + "shortName": "get_document" + }, + "description": "Sample for GetDocument", + "file": "generativelanguage_v1beta_generated_retriever_service_get_document_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_GetDocument_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_get_document_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient", + "shortName": "RetrieverServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient.get_document", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.GetDocument", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "GetDocument" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.GetDocumentRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.Document", + "shortName": "get_document" + }, + "description": "Sample for GetDocument", + "file": "generativelanguage_v1beta_generated_retriever_service_get_document_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_GetDocument_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_get_document_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient", + "shortName": "RetrieverServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient.list_chunks", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.ListChunks", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "ListChunks" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.ListChunksRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.services.retriever_service.pagers.ListChunksAsyncPager", + "shortName": "list_chunks" + }, + "description": "Sample for ListChunks", + "file": "generativelanguage_v1beta_generated_retriever_service_list_chunks_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_ListChunks_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_list_chunks_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient", + "shortName": "RetrieverServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient.list_chunks", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.ListChunks", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "ListChunks" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.ListChunksRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.services.retriever_service.pagers.ListChunksPager", + "shortName": "list_chunks" + }, + "description": "Sample for ListChunks", + "file": "generativelanguage_v1beta_generated_retriever_service_list_chunks_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_ListChunks_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_list_chunks_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient", + "shortName": "RetrieverServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient.list_corpora", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.ListCorpora", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "ListCorpora" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.ListCorporaRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.services.retriever_service.pagers.ListCorporaAsyncPager", + "shortName": "list_corpora" + }, + "description": "Sample for ListCorpora", + "file": "generativelanguage_v1beta_generated_retriever_service_list_corpora_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_ListCorpora_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_list_corpora_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient", + "shortName": "RetrieverServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient.list_corpora", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.ListCorpora", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "ListCorpora" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.ListCorporaRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.services.retriever_service.pagers.ListCorporaPager", + "shortName": "list_corpora" + }, + "description": "Sample for ListCorpora", + "file": "generativelanguage_v1beta_generated_retriever_service_list_corpora_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_ListCorpora_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_list_corpora_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient", + "shortName": "RetrieverServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient.list_documents", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.ListDocuments", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "ListDocuments" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.ListDocumentsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.services.retriever_service.pagers.ListDocumentsAsyncPager", + "shortName": "list_documents" + }, + "description": "Sample for ListDocuments", + "file": "generativelanguage_v1beta_generated_retriever_service_list_documents_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_ListDocuments_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_list_documents_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient", + "shortName": "RetrieverServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient.list_documents", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.ListDocuments", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "ListDocuments" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.ListDocumentsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.services.retriever_service.pagers.ListDocumentsPager", + "shortName": "list_documents" + }, + "description": "Sample for ListDocuments", + "file": "generativelanguage_v1beta_generated_retriever_service_list_documents_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_ListDocuments_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_list_documents_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient", + "shortName": "RetrieverServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient.query_corpus", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.QueryCorpus", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "QueryCorpus" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.QueryCorpusRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.QueryCorpusResponse", + "shortName": "query_corpus" + }, + "description": "Sample for QueryCorpus", + "file": "generativelanguage_v1beta_generated_retriever_service_query_corpus_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_QueryCorpus_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_query_corpus_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient", + "shortName": "RetrieverServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient.query_corpus", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.QueryCorpus", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "QueryCorpus" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.QueryCorpusRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.QueryCorpusResponse", + "shortName": "query_corpus" + }, + "description": "Sample for QueryCorpus", + "file": "generativelanguage_v1beta_generated_retriever_service_query_corpus_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_QueryCorpus_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_query_corpus_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient", + "shortName": "RetrieverServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient.query_document", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.QueryDocument", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "QueryDocument" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.QueryDocumentRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.QueryDocumentResponse", + "shortName": "query_document" + }, + "description": "Sample for QueryDocument", + "file": "generativelanguage_v1beta_generated_retriever_service_query_document_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_QueryDocument_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_query_document_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient", + "shortName": "RetrieverServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient.query_document", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.QueryDocument", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "QueryDocument" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.QueryDocumentRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.QueryDocumentResponse", + "shortName": "query_document" + }, + "description": "Sample for QueryDocument", + "file": "generativelanguage_v1beta_generated_retriever_service_query_document_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_QueryDocument_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_query_document_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient", + "shortName": "RetrieverServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient.update_chunk", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.UpdateChunk", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "UpdateChunk" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.UpdateChunkRequest" + }, + { + "name": "chunk", + "type": "google.ai.generativelanguage_v1beta.types.Chunk" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.Chunk", + "shortName": "update_chunk" + }, + "description": "Sample for UpdateChunk", + "file": "generativelanguage_v1beta_generated_retriever_service_update_chunk_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_UpdateChunk_async", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_update_chunk_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient", + "shortName": "RetrieverServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient.update_chunk", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.UpdateChunk", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "UpdateChunk" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.UpdateChunkRequest" + }, + { + "name": "chunk", + "type": "google.ai.generativelanguage_v1beta.types.Chunk" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.Chunk", + "shortName": "update_chunk" + }, + "description": "Sample for UpdateChunk", + "file": "generativelanguage_v1beta_generated_retriever_service_update_chunk_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_UpdateChunk_sync", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_update_chunk_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient", + "shortName": "RetrieverServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient.update_corpus", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.UpdateCorpus", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "UpdateCorpus" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.UpdateCorpusRequest" + }, + { + "name": "corpus", + "type": "google.ai.generativelanguage_v1beta.types.Corpus" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.Corpus", + "shortName": "update_corpus" + }, + "description": "Sample for UpdateCorpus", + "file": "generativelanguage_v1beta_generated_retriever_service_update_corpus_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_UpdateCorpus_async", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_update_corpus_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient", + "shortName": "RetrieverServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient.update_corpus", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.UpdateCorpus", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "UpdateCorpus" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.UpdateCorpusRequest" + }, + { + "name": "corpus", + "type": "google.ai.generativelanguage_v1beta.types.Corpus" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.Corpus", + "shortName": "update_corpus" + }, + "description": "Sample for UpdateCorpus", + "file": "generativelanguage_v1beta_generated_retriever_service_update_corpus_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_UpdateCorpus_sync", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_update_corpus_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient", + "shortName": "RetrieverServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceAsyncClient.update_document", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.UpdateDocument", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "UpdateDocument" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.UpdateDocumentRequest" + }, + { + "name": "document", + "type": "google.ai.generativelanguage_v1beta.types.Document" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.Document", + "shortName": "update_document" + }, + "description": "Sample for UpdateDocument", + "file": "generativelanguage_v1beta_generated_retriever_service_update_document_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_UpdateDocument_async", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_update_document_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient", + "shortName": "RetrieverServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.RetrieverServiceClient.update_document", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService.UpdateDocument", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.RetrieverService", + "shortName": "RetrieverService" + }, + "shortName": "UpdateDocument" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.UpdateDocumentRequest" + }, + { + "name": "document", + "type": "google.ai.generativelanguage_v1beta.types.Document" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.Document", + "shortName": "update_document" + }, + "description": "Sample for UpdateDocument", + "file": "generativelanguage_v1beta_generated_retriever_service_update_document_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_RetrieverService_UpdateDocument_sync", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_retriever_service_update_document_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.TextServiceAsyncClient", + "shortName": "TextServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.TextServiceAsyncClient.batch_embed_text", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.TextService.BatchEmbedText", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.TextService", + "shortName": "TextService" + }, + "shortName": "BatchEmbedText" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.BatchEmbedTextRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "texts", + "type": "MutableSequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.BatchEmbedTextResponse", + "shortName": "batch_embed_text" + }, + "description": "Sample for BatchEmbedText", + "file": "generativelanguage_v1beta_generated_text_service_batch_embed_text_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_TextService_BatchEmbedText_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_text_service_batch_embed_text_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.TextServiceClient", + "shortName": "TextServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.TextServiceClient.batch_embed_text", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.TextService.BatchEmbedText", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.TextService", + "shortName": "TextService" + }, + "shortName": "BatchEmbedText" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.BatchEmbedTextRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "texts", + "type": "MutableSequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.BatchEmbedTextResponse", + "shortName": "batch_embed_text" + }, + "description": "Sample for BatchEmbedText", + "file": "generativelanguage_v1beta_generated_text_service_batch_embed_text_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_TextService_BatchEmbedText_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_text_service_batch_embed_text_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.TextServiceAsyncClient", + "shortName": "TextServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.TextServiceAsyncClient.count_text_tokens", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.TextService.CountTextTokens", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.TextService", + "shortName": "TextService" + }, + "shortName": "CountTextTokens" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.CountTextTokensRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "prompt", + "type": "google.ai.generativelanguage_v1beta.types.TextPrompt" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.CountTextTokensResponse", + "shortName": "count_text_tokens" + }, + "description": "Sample for CountTextTokens", + "file": "generativelanguage_v1beta_generated_text_service_count_text_tokens_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_TextService_CountTextTokens_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_text_service_count_text_tokens_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.TextServiceClient", + "shortName": "TextServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.TextServiceClient.count_text_tokens", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.TextService.CountTextTokens", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.TextService", + "shortName": "TextService" + }, + "shortName": "CountTextTokens" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.CountTextTokensRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "prompt", + "type": "google.ai.generativelanguage_v1beta.types.TextPrompt" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.CountTextTokensResponse", + "shortName": "count_text_tokens" + }, + "description": "Sample for CountTextTokens", + "file": "generativelanguage_v1beta_generated_text_service_count_text_tokens_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_TextService_CountTextTokens_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_text_service_count_text_tokens_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.TextServiceAsyncClient", + "shortName": "TextServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.TextServiceAsyncClient.embed_text", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.TextService.EmbedText", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.TextService", + "shortName": "TextService" + }, + "shortName": "EmbedText" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.EmbedTextRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "text", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.EmbedTextResponse", + "shortName": "embed_text" + }, + "description": "Sample for EmbedText", + "file": "generativelanguage_v1beta_generated_text_service_embed_text_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_TextService_EmbedText_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_text_service_embed_text_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.TextServiceClient", + "shortName": "TextServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.TextServiceClient.embed_text", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.TextService.EmbedText", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.TextService", + "shortName": "TextService" + }, + "shortName": "EmbedText" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.EmbedTextRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "text", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.EmbedTextResponse", + "shortName": "embed_text" + }, + "description": "Sample for EmbedText", + "file": "generativelanguage_v1beta_generated_text_service_embed_text_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_TextService_EmbedText_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_text_service_embed_text_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.ai.generativelanguage_v1beta.TextServiceAsyncClient", + "shortName": "TextServiceAsyncClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.TextServiceAsyncClient.generate_text", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.TextService.GenerateText", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.TextService", + "shortName": "TextService" + }, + "shortName": "GenerateText" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.GenerateTextRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "prompt", + "type": "google.ai.generativelanguage_v1beta.types.TextPrompt" + }, + { + "name": "temperature", + "type": "float" + }, + { + "name": "candidate_count", + "type": "int" + }, + { + "name": "max_output_tokens", + "type": "int" + }, + { + "name": "top_p", + "type": "float" + }, + { + "name": "top_k", + "type": "int" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.GenerateTextResponse", + "shortName": "generate_text" + }, + "description": "Sample for GenerateText", + "file": "generativelanguage_v1beta_generated_text_service_generate_text_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_TextService_GenerateText_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_text_service_generate_text_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.ai.generativelanguage_v1beta.TextServiceClient", + "shortName": "TextServiceClient" + }, + "fullName": "google.ai.generativelanguage_v1beta.TextServiceClient.generate_text", + "method": { + "fullName": "google.ai.generativelanguage.v1beta.TextService.GenerateText", + "service": { + "fullName": "google.ai.generativelanguage.v1beta.TextService", + "shortName": "TextService" + }, + "shortName": "GenerateText" + }, + "parameters": [ + { + "name": "request", + "type": "google.ai.generativelanguage_v1beta.types.GenerateTextRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "prompt", + "type": "google.ai.generativelanguage_v1beta.types.TextPrompt" + }, + { + "name": "temperature", + "type": "float" + }, + { + "name": "candidate_count", + "type": "int" + }, + { + "name": "max_output_tokens", + "type": "int" + }, + { + "name": "top_p", + "type": "float" + }, + { + "name": "top_k", + "type": "int" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.ai.generativelanguage_v1beta.types.GenerateTextResponse", + "shortName": "generate_text" + }, + "description": "Sample for GenerateText", + "file": "generativelanguage_v1beta_generated_text_service_generate_text_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "generativelanguage_v1beta_generated_TextService_GenerateText_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "generativelanguage_v1beta_generated_text_service_generate_text_sync.py" + } + ] +} diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1beta2.json b/packages/google-ai-generativelanguage/samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1beta2.json index 0d2d254a89dd..5b7d0a0509b4 100644 --- a/packages/google-ai-generativelanguage/samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1beta2.json +++ b/packages/google-ai-generativelanguage/samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1beta2.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-ai-generativelanguage", - "version": "0.3.5" + "version": "0.1.0" }, "snippets": [ { diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1beta3.json b/packages/google-ai-generativelanguage/samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1beta3.json index 6cd937cf3082..91de9e353f90 100644 --- a/packages/google-ai-generativelanguage/samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1beta3.json +++ b/packages/google-ai-generativelanguage/samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1beta3.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-ai-generativelanguage", - "version": "0.3.5" + "version": "0.1.0" }, "snippets": [ { diff --git a/packages/google-ai-generativelanguage/scripts/fixup_generativelanguage_v1_keywords.py b/packages/google-ai-generativelanguage/scripts/fixup_generativelanguage_v1_keywords.py new file mode 100644 index 000000000000..e18ab501752d --- /dev/null +++ b/packages/google-ai-generativelanguage/scripts/fixup_generativelanguage_v1_keywords.py @@ -0,0 +1,182 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class generativelanguageCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'batch_embed_contents': ('model', 'requests', ), + 'count_tokens': ('model', 'contents', ), + 'embed_content': ('model', 'content', 'task_type', 'title', ), + 'generate_content': ('model', 'contents', 'safety_settings', 'generation_config', ), + 'get_model': ('name', ), + 'list_models': ('page_size', 'page_token', ), + 'stream_generate_content': ('model', 'contents', 'safety_settings', 'generation_config', ), + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: a.keyword.value not in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=generativelanguageCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the generativelanguage client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/packages/google-ai-generativelanguage/scripts/fixup_generativelanguage_v1beta_keywords.py b/packages/google-ai-generativelanguage/scripts/fixup_generativelanguage_v1beta_keywords.py new file mode 100644 index 000000000000..a63589d8f655 --- /dev/null +++ b/packages/google-ai-generativelanguage/scripts/fixup_generativelanguage_v1beta_keywords.py @@ -0,0 +1,220 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class generativelanguageCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'batch_create_chunks': ('requests', 'parent', ), + 'batch_delete_chunks': ('requests', 'parent', ), + 'batch_embed_contents': ('model', 'requests', ), + 'batch_embed_text': ('model', 'texts', 'requests', ), + 'batch_update_chunks': ('requests', 'parent', ), + 'count_message_tokens': ('model', 'prompt', ), + 'count_text_tokens': ('model', 'prompt', ), + 'count_tokens': ('model', 'contents', ), + 'create_chunk': ('parent', 'chunk', ), + 'create_corpus': ('corpus', ), + 'create_document': ('parent', 'document', ), + 'create_permission': ('parent', 'permission', ), + 'create_tuned_model': ('tuned_model', 'tuned_model_id', ), + 'delete_chunk': ('name', ), + 'delete_corpus': ('name', 'force', ), + 'delete_document': ('name', 'force', ), + 'delete_permission': ('name', ), + 'delete_tuned_model': ('name', ), + 'embed_content': ('model', 'content', 'task_type', 'title', ), + 'embed_text': ('model', 'text', ), + 'generate_answer': ('model', 'contents', 'answer_style', 'inline_passages', 'semantic_retriever', 'safety_settings', 'temperature', ), + 'generate_content': ('model', 'contents', 'tools', 'safety_settings', 'generation_config', ), + 'generate_message': ('model', 'prompt', 'temperature', 'candidate_count', 'top_p', 'top_k', ), + 'generate_text': ('model', 'prompt', 'temperature', 'candidate_count', 'max_output_tokens', 'top_p', 'top_k', 'safety_settings', 'stop_sequences', ), + 'get_chunk': ('name', ), + 'get_corpus': ('name', ), + 'get_document': ('name', ), + 'get_model': ('name', ), + 'get_permission': ('name', ), + 'get_tuned_model': ('name', ), + 'list_chunks': ('parent', 'page_size', 'page_token', ), + 'list_corpora': ('page_size', 'page_token', ), + 'list_documents': ('parent', 'page_size', 'page_token', ), + 'list_models': ('page_size', 'page_token', ), + 'list_permissions': ('parent', 'page_size', 'page_token', ), + 'list_tuned_models': ('page_size', 'page_token', 'filter', ), + 'query_corpus': ('name', 'query', 'metadata_filters', 'results_count', ), + 'query_document': ('name', 'query', 'results_count', 'metadata_filters', ), + 'stream_generate_content': ('model', 'contents', 'tools', 'safety_settings', 'generation_config', ), + 'transfer_ownership': ('name', 'email_address', ), + 'update_chunk': ('chunk', 'update_mask', ), + 'update_corpus': ('corpus', 'update_mask', ), + 'update_document': ('document', 'update_mask', ), + 'update_permission': ('permission', 'update_mask', ), + 'update_tuned_model': ('tuned_model', 'update_mask', ), + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: a.keyword.value not in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=generativelanguageCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the generativelanguage client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1/__init__.py b/packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1/__init__.py new file mode 100644 index 000000000000..89a37dc92c5a --- /dev/null +++ b/packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1/test_generative_service.py b/packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1/test_generative_service.py new file mode 100644 index 000000000000..60c04af4dfa5 --- /dev/null +++ b/packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1/test_generative_service.py @@ -0,0 +1,4739 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER + import mock + +from collections.abc import Iterable +import json +import math + +from google.api_core import gapic_v1, grpc_helpers, grpc_helpers_async, path_template +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +import google.auth +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account +from google.protobuf import json_format +import grpc +from grpc.experimental import aio +from proto.marshal.rules import wrappers +from proto.marshal.rules.dates import DurationRule, TimestampRule +import pytest +from requests import PreparedRequest, Request, Response +from requests.sessions import Session + +from google.ai.generativelanguage_v1.services.generative_service import ( + GenerativeServiceAsyncClient, + GenerativeServiceClient, + transports, +) +from google.ai.generativelanguage_v1.types import content +from google.ai.generativelanguage_v1.types import content as gag_content +from google.ai.generativelanguage_v1.types import generative_service, safety + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert GenerativeServiceClient._get_default_mtls_endpoint(None) is None + assert ( + GenerativeServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + GenerativeServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + GenerativeServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + GenerativeServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + GenerativeServiceClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (GenerativeServiceClient, "grpc"), + (GenerativeServiceAsyncClient, "grpc_asyncio"), + (GenerativeServiceClient, "rest"), + ], +) +def test_generative_service_client_from_service_account_info( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "generativelanguage.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://generativelanguage.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.GenerativeServiceGrpcTransport, "grpc"), + (transports.GenerativeServiceGrpcAsyncIOTransport, "grpc_asyncio"), + (transports.GenerativeServiceRestTransport, "rest"), + ], +) +def test_generative_service_client_service_account_always_use_jwt( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (GenerativeServiceClient, "grpc"), + (GenerativeServiceAsyncClient, "grpc_asyncio"), + (GenerativeServiceClient, "rest"), + ], +) +def test_generative_service_client_from_service_account_file( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "generativelanguage.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://generativelanguage.googleapis.com" + ) + + +def test_generative_service_client_get_transport_class(): + transport = GenerativeServiceClient.get_transport_class() + available_transports = [ + transports.GenerativeServiceGrpcTransport, + transports.GenerativeServiceRestTransport, + ] + assert transport in available_transports + + transport = GenerativeServiceClient.get_transport_class("grpc") + assert transport == transports.GenerativeServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (GenerativeServiceClient, transports.GenerativeServiceGrpcTransport, "grpc"), + ( + GenerativeServiceAsyncClient, + transports.GenerativeServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + (GenerativeServiceClient, transports.GenerativeServiceRestTransport, "rest"), + ], +) +@mock.patch.object( + GenerativeServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(GenerativeServiceClient), +) +@mock.patch.object( + GenerativeServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(GenerativeServiceAsyncClient), +) +def test_generative_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(GenerativeServiceClient, "get_transport_class") as gtc: + transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(GenerativeServiceClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions( + api_audience="https://language.googleapis.com" + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com", + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + GenerativeServiceClient, + transports.GenerativeServiceGrpcTransport, + "grpc", + "true", + ), + ( + GenerativeServiceAsyncClient, + transports.GenerativeServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + GenerativeServiceClient, + transports.GenerativeServiceGrpcTransport, + "grpc", + "false", + ), + ( + GenerativeServiceAsyncClient, + transports.GenerativeServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ( + GenerativeServiceClient, + transports.GenerativeServiceRestTransport, + "rest", + "true", + ), + ( + GenerativeServiceClient, + transports.GenerativeServiceRestTransport, + "rest", + "false", + ), + ], +) +@mock.patch.object( + GenerativeServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(GenerativeServiceClient), +) +@mock.patch.object( + GenerativeServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(GenerativeServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_generative_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class", [GenerativeServiceClient, GenerativeServiceAsyncClient] +) +@mock.patch.object( + GenerativeServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(GenerativeServiceClient), +) +@mock.patch.object( + GenerativeServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(GenerativeServiceAsyncClient), +) +def test_generative_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_client_cert_source, + ): + ( + api_endpoint, + cert_source, + ) = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (GenerativeServiceClient, transports.GenerativeServiceGrpcTransport, "grpc"), + ( + GenerativeServiceAsyncClient, + transports.GenerativeServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + (GenerativeServiceClient, transports.GenerativeServiceRestTransport, "rest"), + ], +) +def test_generative_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + GenerativeServiceClient, + transports.GenerativeServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + GenerativeServiceAsyncClient, + transports.GenerativeServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ( + GenerativeServiceClient, + transports.GenerativeServiceRestTransport, + "rest", + None, + ), + ], +) +def test_generative_service_client_client_options_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +def test_generative_service_client_client_options_from_dict(): + with mock.patch( + "google.ai.generativelanguage_v1.services.generative_service.transports.GenerativeServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = GenerativeServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + GenerativeServiceClient, + transports.GenerativeServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + GenerativeServiceAsyncClient, + transports.GenerativeServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ], +) +def test_generative_service_client_create_channel_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "generativelanguage.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=(), + scopes=None, + default_host="generativelanguage.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + generative_service.GenerateContentRequest, + dict, + ], +) +def test_generate_content(request_type, transport: str = "grpc"): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_content), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = generative_service.GenerateContentResponse() + response = client.generate_content(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == generative_service.GenerateContentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, generative_service.GenerateContentResponse) + + +def test_generate_content_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_content), "__call__") as call: + client.generate_content() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == generative_service.GenerateContentRequest() + + +@pytest.mark.asyncio +async def test_generate_content_async( + transport: str = "grpc_asyncio", + request_type=generative_service.GenerateContentRequest, +): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_content), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + generative_service.GenerateContentResponse() + ) + response = await client.generate_content(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == generative_service.GenerateContentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, generative_service.GenerateContentResponse) + + +@pytest.mark.asyncio +async def test_generate_content_async_from_dict(): + await test_generate_content_async(request_type=dict) + + +def test_generate_content_field_headers(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = generative_service.GenerateContentRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_content), "__call__") as call: + call.return_value = generative_service.GenerateContentResponse() + client.generate_content(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_generate_content_field_headers_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = generative_service.GenerateContentRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_content), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + generative_service.GenerateContentResponse() + ) + await client.generate_content(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +def test_generate_content_flattened(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_content), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = generative_service.GenerateContentResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.generate_content( + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].contents + mock_val = [content.Content(parts=[content.Part(text="text_value")])] + assert arg == mock_val + + +def test_generate_content_flattened_error(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.generate_content( + generative_service.GenerateContentRequest(), + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + + +@pytest.mark.asyncio +async def test_generate_content_flattened_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_content), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = generative_service.GenerateContentResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + generative_service.GenerateContentResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.generate_content( + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].contents + mock_val = [content.Content(parts=[content.Part(text="text_value")])] + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_generate_content_flattened_error_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.generate_content( + generative_service.GenerateContentRequest(), + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + generative_service.GenerateContentRequest, + dict, + ], +) +def test_stream_generate_content(request_type, transport: str = "grpc"): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stream_generate_content), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iter([generative_service.GenerateContentResponse()]) + response = client.stream_generate_content(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == generative_service.GenerateContentRequest() + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, generative_service.GenerateContentResponse) + + +def test_stream_generate_content_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stream_generate_content), "__call__" + ) as call: + client.stream_generate_content() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == generative_service.GenerateContentRequest() + + +@pytest.mark.asyncio +async def test_stream_generate_content_async( + transport: str = "grpc_asyncio", + request_type=generative_service.GenerateContentRequest, +): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stream_generate_content), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[generative_service.GenerateContentResponse()] + ) + response = await client.stream_generate_content(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == generative_service.GenerateContentRequest() + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, generative_service.GenerateContentResponse) + + +@pytest.mark.asyncio +async def test_stream_generate_content_async_from_dict(): + await test_stream_generate_content_async(request_type=dict) + + +def test_stream_generate_content_field_headers(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = generative_service.GenerateContentRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stream_generate_content), "__call__" + ) as call: + call.return_value = iter([generative_service.GenerateContentResponse()]) + client.stream_generate_content(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_stream_generate_content_field_headers_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = generative_service.GenerateContentRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stream_generate_content), "__call__" + ) as call: + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[generative_service.GenerateContentResponse()] + ) + await client.stream_generate_content(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +def test_stream_generate_content_flattened(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stream_generate_content), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iter([generative_service.GenerateContentResponse()]) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.stream_generate_content( + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].contents + mock_val = [content.Content(parts=[content.Part(text="text_value")])] + assert arg == mock_val + + +def test_stream_generate_content_flattened_error(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.stream_generate_content( + generative_service.GenerateContentRequest(), + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + + +@pytest.mark.asyncio +async def test_stream_generate_content_flattened_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stream_generate_content), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iter([generative_service.GenerateContentResponse()]) + + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.stream_generate_content( + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].contents + mock_val = [content.Content(parts=[content.Part(text="text_value")])] + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_stream_generate_content_flattened_error_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.stream_generate_content( + generative_service.GenerateContentRequest(), + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + generative_service.EmbedContentRequest, + dict, + ], +) +def test_embed_content(request_type, transport: str = "grpc"): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.embed_content), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = generative_service.EmbedContentResponse() + response = client.embed_content(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == generative_service.EmbedContentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, generative_service.EmbedContentResponse) + + +def test_embed_content_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.embed_content), "__call__") as call: + client.embed_content() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == generative_service.EmbedContentRequest() + + +@pytest.mark.asyncio +async def test_embed_content_async( + transport: str = "grpc_asyncio", request_type=generative_service.EmbedContentRequest +): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.embed_content), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + generative_service.EmbedContentResponse() + ) + response = await client.embed_content(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == generative_service.EmbedContentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, generative_service.EmbedContentResponse) + + +@pytest.mark.asyncio +async def test_embed_content_async_from_dict(): + await test_embed_content_async(request_type=dict) + + +def test_embed_content_field_headers(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = generative_service.EmbedContentRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.embed_content), "__call__") as call: + call.return_value = generative_service.EmbedContentResponse() + client.embed_content(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_embed_content_field_headers_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = generative_service.EmbedContentRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.embed_content), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + generative_service.EmbedContentResponse() + ) + await client.embed_content(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +def test_embed_content_flattened(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.embed_content), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = generative_service.EmbedContentResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.embed_content( + model="model_value", + content=gag_content.Content(parts=[gag_content.Part(text="text_value")]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].content + mock_val = gag_content.Content(parts=[gag_content.Part(text="text_value")]) + assert arg == mock_val + + +def test_embed_content_flattened_error(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.embed_content( + generative_service.EmbedContentRequest(), + model="model_value", + content=gag_content.Content(parts=[gag_content.Part(text="text_value")]), + ) + + +@pytest.mark.asyncio +async def test_embed_content_flattened_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.embed_content), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = generative_service.EmbedContentResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + generative_service.EmbedContentResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.embed_content( + model="model_value", + content=gag_content.Content(parts=[gag_content.Part(text="text_value")]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].content + mock_val = gag_content.Content(parts=[gag_content.Part(text="text_value")]) + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_embed_content_flattened_error_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.embed_content( + generative_service.EmbedContentRequest(), + model="model_value", + content=gag_content.Content(parts=[gag_content.Part(text="text_value")]), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + generative_service.BatchEmbedContentsRequest, + dict, + ], +) +def test_batch_embed_contents(request_type, transport: str = "grpc"): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_embed_contents), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = generative_service.BatchEmbedContentsResponse() + response = client.batch_embed_contents(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == generative_service.BatchEmbedContentsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, generative_service.BatchEmbedContentsResponse) + + +def test_batch_embed_contents_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_embed_contents), "__call__" + ) as call: + client.batch_embed_contents() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == generative_service.BatchEmbedContentsRequest() + + +@pytest.mark.asyncio +async def test_batch_embed_contents_async( + transport: str = "grpc_asyncio", + request_type=generative_service.BatchEmbedContentsRequest, +): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_embed_contents), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + generative_service.BatchEmbedContentsResponse() + ) + response = await client.batch_embed_contents(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == generative_service.BatchEmbedContentsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, generative_service.BatchEmbedContentsResponse) + + +@pytest.mark.asyncio +async def test_batch_embed_contents_async_from_dict(): + await test_batch_embed_contents_async(request_type=dict) + + +def test_batch_embed_contents_field_headers(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = generative_service.BatchEmbedContentsRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_embed_contents), "__call__" + ) as call: + call.return_value = generative_service.BatchEmbedContentsResponse() + client.batch_embed_contents(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_batch_embed_contents_field_headers_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = generative_service.BatchEmbedContentsRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_embed_contents), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + generative_service.BatchEmbedContentsResponse() + ) + await client.batch_embed_contents(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +def test_batch_embed_contents_flattened(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_embed_contents), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = generative_service.BatchEmbedContentsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.batch_embed_contents( + model="model_value", + requests=[generative_service.EmbedContentRequest(model="model_value")], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].requests + mock_val = [generative_service.EmbedContentRequest(model="model_value")] + assert arg == mock_val + + +def test_batch_embed_contents_flattened_error(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_embed_contents( + generative_service.BatchEmbedContentsRequest(), + model="model_value", + requests=[generative_service.EmbedContentRequest(model="model_value")], + ) + + +@pytest.mark.asyncio +async def test_batch_embed_contents_flattened_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_embed_contents), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = generative_service.BatchEmbedContentsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + generative_service.BatchEmbedContentsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.batch_embed_contents( + model="model_value", + requests=[generative_service.EmbedContentRequest(model="model_value")], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].requests + mock_val = [generative_service.EmbedContentRequest(model="model_value")] + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_batch_embed_contents_flattened_error_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.batch_embed_contents( + generative_service.BatchEmbedContentsRequest(), + model="model_value", + requests=[generative_service.EmbedContentRequest(model="model_value")], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + generative_service.CountTokensRequest, + dict, + ], +) +def test_count_tokens(request_type, transport: str = "grpc"): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.count_tokens), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = generative_service.CountTokensResponse( + total_tokens=1303, + ) + response = client.count_tokens(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == generative_service.CountTokensRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, generative_service.CountTokensResponse) + assert response.total_tokens == 1303 + + +def test_count_tokens_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.count_tokens), "__call__") as call: + client.count_tokens() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == generative_service.CountTokensRequest() + + +@pytest.mark.asyncio +async def test_count_tokens_async( + transport: str = "grpc_asyncio", request_type=generative_service.CountTokensRequest +): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.count_tokens), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + generative_service.CountTokensResponse( + total_tokens=1303, + ) + ) + response = await client.count_tokens(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == generative_service.CountTokensRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, generative_service.CountTokensResponse) + assert response.total_tokens == 1303 + + +@pytest.mark.asyncio +async def test_count_tokens_async_from_dict(): + await test_count_tokens_async(request_type=dict) + + +def test_count_tokens_field_headers(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = generative_service.CountTokensRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.count_tokens), "__call__") as call: + call.return_value = generative_service.CountTokensResponse() + client.count_tokens(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_count_tokens_field_headers_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = generative_service.CountTokensRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.count_tokens), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + generative_service.CountTokensResponse() + ) + await client.count_tokens(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +def test_count_tokens_flattened(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.count_tokens), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = generative_service.CountTokensResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.count_tokens( + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].contents + mock_val = [content.Content(parts=[content.Part(text="text_value")])] + assert arg == mock_val + + +def test_count_tokens_flattened_error(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.count_tokens( + generative_service.CountTokensRequest(), + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + + +@pytest.mark.asyncio +async def test_count_tokens_flattened_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.count_tokens), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = generative_service.CountTokensResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + generative_service.CountTokensResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.count_tokens( + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].contents + mock_val = [content.Content(parts=[content.Part(text="text_value")])] + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_count_tokens_flattened_error_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.count_tokens( + generative_service.CountTokensRequest(), + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + generative_service.GenerateContentRequest, + dict, + ], +) +def test_generate_content_rest(request_type): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"model": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = generative_service.GenerateContentResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = generative_service.GenerateContentResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.generate_content(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, generative_service.GenerateContentResponse) + + +def test_generate_content_rest_required_fields( + request_type=generative_service.GenerateContentRequest, +): + transport_class = transports.GenerativeServiceRestTransport + + request_init = {} + request_init["model"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).generate_content._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["model"] = "model_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).generate_content._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "model" in jsonified_request + assert jsonified_request["model"] == "model_value" + + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = generative_service.GenerateContentResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = generative_service.GenerateContentResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.generate_content(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_generate_content_rest_unset_required_fields(): + transport = transports.GenerativeServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.generate_content._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "model", + "contents", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_generate_content_rest_interceptors(null_interceptor): + transport = transports.GenerativeServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.GenerativeServiceRestInterceptor(), + ) + client = GenerativeServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.GenerativeServiceRestInterceptor, "post_generate_content" + ) as post, mock.patch.object( + transports.GenerativeServiceRestInterceptor, "pre_generate_content" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = generative_service.GenerateContentRequest.pb( + generative_service.GenerateContentRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = generative_service.GenerateContentResponse.to_json( + generative_service.GenerateContentResponse() + ) + + request = generative_service.GenerateContentRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = generative_service.GenerateContentResponse() + + client.generate_content( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_generate_content_rest_bad_request( + transport: str = "rest", request_type=generative_service.GenerateContentRequest +): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"model": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.generate_content(request) + + +def test_generate_content_rest_flattened(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = generative_service.GenerateContentResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"model": "models/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = generative_service.GenerateContentResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.generate_content(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{model=models/*}:generateContent" % client.transport._host, args[1] + ) + + +def test_generate_content_rest_flattened_error(transport: str = "rest"): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.generate_content( + generative_service.GenerateContentRequest(), + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + + +def test_generate_content_rest_error(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + generative_service.GenerateContentRequest, + dict, + ], +) +def test_stream_generate_content_rest(request_type): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"model": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = generative_service.GenerateContentResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = generative_service.GenerateContentResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.stream_generate_content(request) + + assert isinstance(response, Iterable) + response = next(response) + + # Establish that the response is the type that we expect. + assert isinstance(response, generative_service.GenerateContentResponse) + + +def test_stream_generate_content_rest_required_fields( + request_type=generative_service.GenerateContentRequest, +): + transport_class = transports.GenerativeServiceRestTransport + + request_init = {} + request_init["model"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).stream_generate_content._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["model"] = "model_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).stream_generate_content._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "model" in jsonified_request + assert jsonified_request["model"] == "model_value" + + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = generative_service.GenerateContentResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = generative_service.GenerateContentResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.stream_generate_content(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_stream_generate_content_rest_unset_required_fields(): + transport = transports.GenerativeServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.stream_generate_content._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "model", + "contents", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_stream_generate_content_rest_interceptors(null_interceptor): + transport = transports.GenerativeServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.GenerativeServiceRestInterceptor(), + ) + client = GenerativeServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.GenerativeServiceRestInterceptor, "post_stream_generate_content" + ) as post, mock.patch.object( + transports.GenerativeServiceRestInterceptor, "pre_stream_generate_content" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = generative_service.GenerateContentRequest.pb( + generative_service.GenerateContentRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = generative_service.GenerateContentResponse.to_json( + generative_service.GenerateContentResponse() + ) + req.return_value._content = "[{}]".format(req.return_value._content) + + request = generative_service.GenerateContentRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = generative_service.GenerateContentResponse() + + client.stream_generate_content( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_stream_generate_content_rest_bad_request( + transport: str = "rest", request_type=generative_service.GenerateContentRequest +): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"model": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.stream_generate_content(request) + + +def test_stream_generate_content_rest_flattened(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = generative_service.GenerateContentResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"model": "models/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = generative_service.GenerateContentResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + client.stream_generate_content(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{model=models/*}:streamGenerateContent" % client.transport._host, + args[1], + ) + + +def test_stream_generate_content_rest_flattened_error(transport: str = "rest"): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.stream_generate_content( + generative_service.GenerateContentRequest(), + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + + +def test_stream_generate_content_rest_error(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + generative_service.EmbedContentRequest, + dict, + ], +) +def test_embed_content_rest(request_type): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"model": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = generative_service.EmbedContentResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = generative_service.EmbedContentResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.embed_content(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, generative_service.EmbedContentResponse) + + +def test_embed_content_rest_required_fields( + request_type=generative_service.EmbedContentRequest, +): + transport_class = transports.GenerativeServiceRestTransport + + request_init = {} + request_init["model"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).embed_content._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["model"] = "model_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).embed_content._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "model" in jsonified_request + assert jsonified_request["model"] == "model_value" + + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = generative_service.EmbedContentResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = generative_service.EmbedContentResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.embed_content(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_embed_content_rest_unset_required_fields(): + transport = transports.GenerativeServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.embed_content._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "model", + "content", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_embed_content_rest_interceptors(null_interceptor): + transport = transports.GenerativeServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.GenerativeServiceRestInterceptor(), + ) + client = GenerativeServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.GenerativeServiceRestInterceptor, "post_embed_content" + ) as post, mock.patch.object( + transports.GenerativeServiceRestInterceptor, "pre_embed_content" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = generative_service.EmbedContentRequest.pb( + generative_service.EmbedContentRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = generative_service.EmbedContentResponse.to_json( + generative_service.EmbedContentResponse() + ) + + request = generative_service.EmbedContentRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = generative_service.EmbedContentResponse() + + client.embed_content( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_embed_content_rest_bad_request( + transport: str = "rest", request_type=generative_service.EmbedContentRequest +): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"model": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.embed_content(request) + + +def test_embed_content_rest_flattened(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = generative_service.EmbedContentResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"model": "models/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + model="model_value", + content=gag_content.Content(parts=[gag_content.Part(text="text_value")]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = generative_service.EmbedContentResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.embed_content(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{model=models/*}:embedContent" % client.transport._host, args[1] + ) + + +def test_embed_content_rest_flattened_error(transport: str = "rest"): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.embed_content( + generative_service.EmbedContentRequest(), + model="model_value", + content=gag_content.Content(parts=[gag_content.Part(text="text_value")]), + ) + + +def test_embed_content_rest_error(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + generative_service.BatchEmbedContentsRequest, + dict, + ], +) +def test_batch_embed_contents_rest(request_type): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"model": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = generative_service.BatchEmbedContentsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = generative_service.BatchEmbedContentsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.batch_embed_contents(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, generative_service.BatchEmbedContentsResponse) + + +def test_batch_embed_contents_rest_required_fields( + request_type=generative_service.BatchEmbedContentsRequest, +): + transport_class = transports.GenerativeServiceRestTransport + + request_init = {} + request_init["model"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).batch_embed_contents._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["model"] = "model_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).batch_embed_contents._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "model" in jsonified_request + assert jsonified_request["model"] == "model_value" + + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = generative_service.BatchEmbedContentsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = generative_service.BatchEmbedContentsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.batch_embed_contents(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_batch_embed_contents_rest_unset_required_fields(): + transport = transports.GenerativeServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.batch_embed_contents._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "model", + "requests", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_batch_embed_contents_rest_interceptors(null_interceptor): + transport = transports.GenerativeServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.GenerativeServiceRestInterceptor(), + ) + client = GenerativeServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.GenerativeServiceRestInterceptor, "post_batch_embed_contents" + ) as post, mock.patch.object( + transports.GenerativeServiceRestInterceptor, "pre_batch_embed_contents" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = generative_service.BatchEmbedContentsRequest.pb( + generative_service.BatchEmbedContentsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = ( + generative_service.BatchEmbedContentsResponse.to_json( + generative_service.BatchEmbedContentsResponse() + ) + ) + + request = generative_service.BatchEmbedContentsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = generative_service.BatchEmbedContentsResponse() + + client.batch_embed_contents( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_batch_embed_contents_rest_bad_request( + transport: str = "rest", request_type=generative_service.BatchEmbedContentsRequest +): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"model": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.batch_embed_contents(request) + + +def test_batch_embed_contents_rest_flattened(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = generative_service.BatchEmbedContentsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"model": "models/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + model="model_value", + requests=[generative_service.EmbedContentRequest(model="model_value")], + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = generative_service.BatchEmbedContentsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.batch_embed_contents(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{model=models/*}:batchEmbedContents" % client.transport._host, + args[1], + ) + + +def test_batch_embed_contents_rest_flattened_error(transport: str = "rest"): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_embed_contents( + generative_service.BatchEmbedContentsRequest(), + model="model_value", + requests=[generative_service.EmbedContentRequest(model="model_value")], + ) + + +def test_batch_embed_contents_rest_error(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + generative_service.CountTokensRequest, + dict, + ], +) +def test_count_tokens_rest(request_type): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"model": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = generative_service.CountTokensResponse( + total_tokens=1303, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = generative_service.CountTokensResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.count_tokens(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, generative_service.CountTokensResponse) + assert response.total_tokens == 1303 + + +def test_count_tokens_rest_required_fields( + request_type=generative_service.CountTokensRequest, +): + transport_class = transports.GenerativeServiceRestTransport + + request_init = {} + request_init["model"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).count_tokens._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["model"] = "model_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).count_tokens._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "model" in jsonified_request + assert jsonified_request["model"] == "model_value" + + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = generative_service.CountTokensResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = generative_service.CountTokensResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.count_tokens(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_count_tokens_rest_unset_required_fields(): + transport = transports.GenerativeServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.count_tokens._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "model", + "contents", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_count_tokens_rest_interceptors(null_interceptor): + transport = transports.GenerativeServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.GenerativeServiceRestInterceptor(), + ) + client = GenerativeServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.GenerativeServiceRestInterceptor, "post_count_tokens" + ) as post, mock.patch.object( + transports.GenerativeServiceRestInterceptor, "pre_count_tokens" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = generative_service.CountTokensRequest.pb( + generative_service.CountTokensRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = generative_service.CountTokensResponse.to_json( + generative_service.CountTokensResponse() + ) + + request = generative_service.CountTokensRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = generative_service.CountTokensResponse() + + client.count_tokens( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_count_tokens_rest_bad_request( + transport: str = "rest", request_type=generative_service.CountTokensRequest +): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"model": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.count_tokens(request) + + +def test_count_tokens_rest_flattened(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = generative_service.CountTokensResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"model": "models/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = generative_service.CountTokensResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.count_tokens(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{model=models/*}:countTokens" % client.transport._host, args[1] + ) + + +def test_count_tokens_rest_flattened_error(transport: str = "rest"): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.count_tokens( + generative_service.CountTokensRequest(), + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + + +def test_count_tokens_rest_error(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.GenerativeServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.GenerativeServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = GenerativeServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.GenerativeServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = GenerativeServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = GenerativeServiceClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.GenerativeServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = GenerativeServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.GenerativeServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = GenerativeServiceClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.GenerativeServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.GenerativeServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.GenerativeServiceGrpcTransport, + transports.GenerativeServiceGrpcAsyncIOTransport, + transports.GenerativeServiceRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "rest", + ], +) +def test_transport_kind(transport_name): + transport = GenerativeServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.GenerativeServiceGrpcTransport, + ) + + +def test_generative_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.GenerativeServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_generative_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.ai.generativelanguage_v1.services.generative_service.transports.GenerativeServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.GenerativeServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "generate_content", + "stream_generate_content", + "embed_content", + "batch_embed_contents", + "count_tokens", + "get_operation", + "cancel_operation", + "list_operations", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_generative_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch( + "google.ai.generativelanguage_v1.services.generative_service.transports.GenerativeServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.GenerativeServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=None, + default_scopes=(), + quota_project_id="octopus", + ) + + +def test_generative_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( + "google.ai.generativelanguage_v1.services.generative_service.transports.GenerativeServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.GenerativeServiceTransport() + adc.assert_called_once() + + +def test_generative_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + GenerativeServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=(), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.GenerativeServiceGrpcTransport, + transports.GenerativeServiceGrpcAsyncIOTransport, + ], +) +def test_generative_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=(), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.GenerativeServiceGrpcTransport, + transports.GenerativeServiceGrpcAsyncIOTransport, + transports.GenerativeServiceRestTransport, + ], +) +def test_generative_service_transport_auth_gdch_credentials(transport_class): + host = "https://language.com" + api_audience_tests = [None, "https://language2.com"] + api_audience_expect = [host, "https://language2.com"] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, "default", autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock( + return_value=gdch_mock + ) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with(e) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.GenerativeServiceGrpcTransport, grpc_helpers), + (transports.GenerativeServiceGrpcAsyncIOTransport, grpc_helpers_async), + ], +) +def test_generative_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + + create_channel.assert_called_with( + "generativelanguage.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=(), + scopes=["1", "2"], + default_host="generativelanguage.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.GenerativeServiceGrpcTransport, + transports.GenerativeServiceGrpcAsyncIOTransport, + ], +) +def test_generative_service_grpc_transport_client_cert_source_for_mtls(transport_class): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_generative_service_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ) as mock_configure_mtls_channel: + transports.GenerativeServiceRestTransport( + credentials=cred, client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_generative_service_host_no_port(transport_name): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="generativelanguage.googleapis.com" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "generativelanguage.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://generativelanguage.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_generative_service_host_with_port(transport_name): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="generativelanguage.googleapis.com:8000" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "generativelanguage.googleapis.com:8000" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://generativelanguage.googleapis.com:8000" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "rest", + ], +) +def test_generative_service_client_transport_session_collision(transport_name): + creds1 = ga_credentials.AnonymousCredentials() + creds2 = ga_credentials.AnonymousCredentials() + client1 = GenerativeServiceClient( + credentials=creds1, + transport=transport_name, + ) + client2 = GenerativeServiceClient( + credentials=creds2, + transport=transport_name, + ) + session1 = client1.transport.generate_content._session + session2 = client2.transport.generate_content._session + assert session1 != session2 + session1 = client1.transport.stream_generate_content._session + session2 = client2.transport.stream_generate_content._session + assert session1 != session2 + session1 = client1.transport.embed_content._session + session2 = client2.transport.embed_content._session + assert session1 != session2 + session1 = client1.transport.batch_embed_contents._session + session2 = client2.transport.batch_embed_contents._session + assert session1 != session2 + session1 = client1.transport.count_tokens._session + session2 = client2.transport.count_tokens._session + assert session1 != session2 + + +def test_generative_service_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.GenerativeServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_generative_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.GenerativeServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.GenerativeServiceGrpcTransport, + transports.GenerativeServiceGrpcAsyncIOTransport, + ], +) +def test_generative_service_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.GenerativeServiceGrpcTransport, + transports.GenerativeServiceGrpcAsyncIOTransport, + ], +) +def test_generative_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_model_path(): + model = "squid" + expected = "models/{model}".format( + model=model, + ) + actual = GenerativeServiceClient.model_path(model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "model": "clam", + } + path = GenerativeServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = GenerativeServiceClient.parse_model_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "whelk" + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = GenerativeServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "octopus", + } + path = GenerativeServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = GenerativeServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "oyster" + expected = "folders/{folder}".format( + folder=folder, + ) + actual = GenerativeServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nudibranch", + } + path = GenerativeServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = GenerativeServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "cuttlefish" + expected = "organizations/{organization}".format( + organization=organization, + ) + actual = GenerativeServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "mussel", + } + path = GenerativeServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = GenerativeServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "winkle" + expected = "projects/{project}".format( + project=project, + ) + actual = GenerativeServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "nautilus", + } + path = GenerativeServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = GenerativeServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "scallop" + location = "abalone" + expected = "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + actual = GenerativeServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "squid", + "location": "clam", + } + path = GenerativeServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = GenerativeServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.GenerativeServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.GenerativeServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = GenerativeServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_cancel_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.CancelOperationRequest +): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "tunedModels/sample1/operations/sample2"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.cancel_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.CancelOperationRequest, + dict, + ], +) +def test_cancel_operation_rest(request_type): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "tunedModels/sample1/operations/sample2"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "{}" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.cancel_operation(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_get_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.GetOperationRequest +): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "tunedModels/sample1/operations/sample2"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.GetOperationRequest, + dict, + ], +) +def test_get_operation_rest(request_type): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "tunedModels/sample1/operations/sample2"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_operation(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_list_operations_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.ListOperationsRequest +): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict({"name": "operations"}, request) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_operations(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.ListOperationsRequest, + dict, + ], +) +def test_list_operations_rest(request_type): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "operations"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.ListOperationsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_operations(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +def test_cancel_operation(transport: str = "grpc"): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_operation_async(transport: str = "grpc_asyncio"): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_operation_field_headers(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_cancel_operation_from_dict(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +@pytest.mark.asyncio +async def test_get_operation_async(transport: str = "grpc_asyncio"): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_get_operation_field_headers(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_get_operation_from_dict(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +@pytest.mark.asyncio +async def test_list_operations_async(transport: str = "grpc_asyncio"): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +def test_list_operations_field_headers(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_list_operations_from_dict(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_transport_close(): + transports = { + "rest": "_session", + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "rest", + "grpc", + ] + for transport in transports: + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + + +@pytest.mark.parametrize( + "client_class,transport_class", + [ + (GenerativeServiceClient, transports.GenerativeServiceGrpcTransport), + ( + GenerativeServiceAsyncClient, + transports.GenerativeServiceGrpcAsyncIOTransport, + ), + ], +) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) diff --git a/packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1/test_model_service.py b/packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1/test_model_service.py new file mode 100644 index 000000000000..115f2efdd589 --- /dev/null +++ b/packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1/test_model_service.py @@ -0,0 +1,3204 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER + import mock + +from collections.abc import Iterable +import json +import math + +from google.api_core import gapic_v1, grpc_helpers, grpc_helpers_async, path_template +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +import google.auth +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account +from google.protobuf import json_format +import grpc +from grpc.experimental import aio +from proto.marshal.rules import wrappers +from proto.marshal.rules.dates import DurationRule, TimestampRule +import pytest +from requests import PreparedRequest, Request, Response +from requests.sessions import Session + +from google.ai.generativelanguage_v1.services.model_service import ( + ModelServiceAsyncClient, + ModelServiceClient, + pagers, + transports, +) +from google.ai.generativelanguage_v1.types import model, model_service + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert ModelServiceClient._get_default_mtls_endpoint(None) is None + assert ( + ModelServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + ) + assert ( + ModelServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + ModelServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + ModelServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ModelServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (ModelServiceClient, "grpc"), + (ModelServiceAsyncClient, "grpc_asyncio"), + (ModelServiceClient, "rest"), + ], +) +def test_model_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "generativelanguage.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://generativelanguage.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.ModelServiceGrpcTransport, "grpc"), + (transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio"), + (transports.ModelServiceRestTransport, "rest"), + ], +) +def test_model_service_client_service_account_always_use_jwt( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (ModelServiceClient, "grpc"), + (ModelServiceAsyncClient, "grpc_asyncio"), + (ModelServiceClient, "rest"), + ], +) +def test_model_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "generativelanguage.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://generativelanguage.googleapis.com" + ) + + +def test_model_service_client_get_transport_class(): + transport = ModelServiceClient.get_transport_class() + available_transports = [ + transports.ModelServiceGrpcTransport, + transports.ModelServiceRestTransport, + ] + assert transport in available_transports + + transport = ModelServiceClient.get_transport_class("grpc") + assert transport == transports.ModelServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc"), + ( + ModelServiceAsyncClient, + transports.ModelServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + (ModelServiceClient, transports.ModelServiceRestTransport, "rest"), + ], +) +@mock.patch.object( + ModelServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceClient) +) +@mock.patch.object( + ModelServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ModelServiceAsyncClient), +) +def test_model_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(ModelServiceClient, "get_transport_class") as gtc: + transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(ModelServiceClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions( + api_audience="https://language.googleapis.com" + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com", + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc", "true"), + ( + ModelServiceAsyncClient, + transports.ModelServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc", "false"), + ( + ModelServiceAsyncClient, + transports.ModelServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + (ModelServiceClient, transports.ModelServiceRestTransport, "rest", "true"), + (ModelServiceClient, transports.ModelServiceRestTransport, "rest", "false"), + ], +) +@mock.patch.object( + ModelServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceClient) +) +@mock.patch.object( + ModelServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ModelServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_model_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize("client_class", [ModelServiceClient, ModelServiceAsyncClient]) +@mock.patch.object( + ModelServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceClient) +) +@mock.patch.object( + ModelServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ModelServiceAsyncClient), +) +def test_model_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_client_cert_source, + ): + ( + api_endpoint, + cert_source, + ) = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc"), + ( + ModelServiceAsyncClient, + transports.ModelServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + (ModelServiceClient, transports.ModelServiceRestTransport, "rest"), + ], +) +def test_model_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + ModelServiceClient, + transports.ModelServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + ModelServiceAsyncClient, + transports.ModelServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + (ModelServiceClient, transports.ModelServiceRestTransport, "rest", None), + ], +) +def test_model_service_client_client_options_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +def test_model_service_client_client_options_from_dict(): + with mock.patch( + "google.ai.generativelanguage_v1.services.model_service.transports.ModelServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = ModelServiceClient(client_options={"api_endpoint": "squid.clam.whelk"}) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + ModelServiceClient, + transports.ModelServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + ModelServiceAsyncClient, + transports.ModelServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ], +) +def test_model_service_client_create_channel_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "generativelanguage.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=(), + scopes=None, + default_host="generativelanguage.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + model_service.GetModelRequest, + dict, + ], +) +def test_get_model(request_type, transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model( + name="name_value", + base_model_id="base_model_id_value", + version="version_value", + display_name="display_name_value", + description="description_value", + input_token_limit=1838, + output_token_limit=1967, + supported_generation_methods=["supported_generation_methods_value"], + temperature=0.1198, + top_p=0.546, + top_k=541, + ) + response = client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model.Model) + assert response.name == "name_value" + assert response.base_model_id == "base_model_id_value" + assert response.version == "version_value" + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert response.input_token_limit == 1838 + assert response.output_token_limit == 1967 + assert response.supported_generation_methods == [ + "supported_generation_methods_value" + ] + assert math.isclose(response.temperature, 0.1198, rel_tol=1e-6) + assert math.isclose(response.top_p, 0.546, rel_tol=1e-6) + assert response.top_k == 541 + + +def test_get_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_model), "__call__") as call: + client.get_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelRequest() + + +@pytest.mark.asyncio +async def test_get_model_async( + transport: str = "grpc_asyncio", request_type=model_service.GetModelRequest +): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model.Model( + name="name_value", + base_model_id="base_model_id_value", + version="version_value", + display_name="display_name_value", + description="description_value", + input_token_limit=1838, + output_token_limit=1967, + supported_generation_methods=["supported_generation_methods_value"], + temperature=0.1198, + top_p=0.546, + top_k=541, + ) + ) + response = await client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model.Model) + assert response.name == "name_value" + assert response.base_model_id == "base_model_id_value" + assert response.version == "version_value" + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert response.input_token_limit == 1838 + assert response.output_token_limit == 1967 + assert response.supported_generation_methods == [ + "supported_generation_methods_value" + ] + assert math.isclose(response.temperature, 0.1198, rel_tol=1e-6) + assert math.isclose(response.top_p, 0.546, rel_tol=1e-6) + assert response.top_k == 541 + + +@pytest.mark.asyncio +async def test_get_model_async_from_dict(): + await test_get_model_async(request_type=dict) + + +def test_get_model_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.GetModelRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_model), "__call__") as call: + call.return_value = model.Model() + client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_model_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.GetModelRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_model), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) + await client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_model_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_model( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_model_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model( + model_service.GetModelRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_model_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_model( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_model_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_model( + model_service.GetModelRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + model_service.ListModelsRequest, + dict, + ], +) +def test_list_models(request_type, transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_models), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelsResponse( + next_page_token="next_page_token_value", + ) + response = client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_models_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_models), "__call__") as call: + client.list_models() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelsRequest() + + +@pytest.mark.asyncio +async def test_list_models_async( + transport: str = "grpc_asyncio", request_type=model_service.ListModelsRequest +): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_models), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_service.ListModelsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelsAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_models_async_from_dict(): + await test_list_models_async(request_type=dict) + + +def test_list_models_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_models), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_models( + page_size=951, + page_token="page_token_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].page_size + mock_val = 951 + assert arg == mock_val + arg = args[0].page_token + mock_val = "page_token_value" + assert arg == mock_val + + +def test_list_models_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_models( + model_service.ListModelsRequest(), + page_size=951, + page_token="page_token_value", + ) + + +@pytest.mark.asyncio +async def test_list_models_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_models), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_service.ListModelsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_models( + page_size=951, + page_token="page_token_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].page_size + mock_val = 951 + assert arg == mock_val + arg = args[0].page_token + mock_val = "page_token_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_models_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_models( + model_service.ListModelsRequest(), + page_size=951, + page_token="page_token_value", + ) + + +def test_list_models_pager(transport_name: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_models), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token="abc", + ), + model_service.ListModelsResponse( + models=[], + next_page_token="def", + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + ], + next_page_token="ghi", + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + + metadata = () + pager = client.list_models(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, model.Model) for i in results) + + +def test_list_models_pages(transport_name: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_models), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token="abc", + ), + model_service.ListModelsResponse( + models=[], + next_page_token="def", + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + ], + next_page_token="ghi", + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + pages = list(client.list_models(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_models_async_pager(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token="abc", + ), + model_service.ListModelsResponse( + models=[], + next_page_token="def", + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + ], + next_page_token="ghi", + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_models( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, model.Model) for i in responses) + + +@pytest.mark.asyncio +async def test_list_models_async_pages(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token="abc", + ), + model_service.ListModelsResponse( + models=[], + next_page_token="def", + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + ], + next_page_token="ghi", + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_models(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + model_service.GetModelRequest, + dict, + ], +) +def test_get_model_rest(request_type): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = model.Model( + name="name_value", + base_model_id="base_model_id_value", + version="version_value", + display_name="display_name_value", + description="description_value", + input_token_limit=1838, + output_token_limit=1967, + supported_generation_methods=["supported_generation_methods_value"], + temperature=0.1198, + top_p=0.546, + top_k=541, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = model.Model.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_model(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, model.Model) + assert response.name == "name_value" + assert response.base_model_id == "base_model_id_value" + assert response.version == "version_value" + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert response.input_token_limit == 1838 + assert response.output_token_limit == 1967 + assert response.supported_generation_methods == [ + "supported_generation_methods_value" + ] + assert math.isclose(response.temperature, 0.1198, rel_tol=1e-6) + assert math.isclose(response.top_p, 0.546, rel_tol=1e-6) + assert response.top_k == 541 + + +def test_get_model_rest_required_fields(request_type=model_service.GetModelRequest): + transport_class = transports.ModelServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = model.Model() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = model.Model.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_model(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_model_rest_unset_required_fields(): + transport = transports.ModelServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_model._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_model_rest_interceptors(null_interceptor): + transport = transports.ModelServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ModelServiceRestInterceptor(), + ) + client = ModelServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ModelServiceRestInterceptor, "post_get_model" + ) as post, mock.patch.object( + transports.ModelServiceRestInterceptor, "pre_get_model" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = model_service.GetModelRequest.pb(model_service.GetModelRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = model.Model.to_json(model.Model()) + + request = model_service.GetModelRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = model.Model() + + client.get_model( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_model_rest_bad_request( + transport: str = "rest", request_type=model_service.GetModelRequest +): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_model(request) + + +def test_get_model_rest_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = model.Model() + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "models/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = model.Model.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_model(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=models/*}" % client.transport._host, args[1] + ) + + +def test_get_model_rest_flattened_error(transport: str = "rest"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model( + model_service.GetModelRequest(), + name="name_value", + ) + + +def test_get_model_rest_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + model_service.ListModelsRequest, + dict, + ], +) +def test_list_models_rest(request_type): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = model_service.ListModelsResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = model_service.ListModelsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_models(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelsPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_models_rest_interceptors(null_interceptor): + transport = transports.ModelServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ModelServiceRestInterceptor(), + ) + client = ModelServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ModelServiceRestInterceptor, "post_list_models" + ) as post, mock.patch.object( + transports.ModelServiceRestInterceptor, "pre_list_models" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = model_service.ListModelsRequest.pb( + model_service.ListModelsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = model_service.ListModelsResponse.to_json( + model_service.ListModelsResponse() + ) + + request = model_service.ListModelsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = model_service.ListModelsResponse() + + client.list_models( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_models_rest_bad_request( + transport: str = "rest", request_type=model_service.ListModelsRequest +): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_models(request) + + +def test_list_models_rest_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = model_service.ListModelsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {} + + # get truthy value for each flattened field + mock_args = dict( + page_size=951, + page_token="page_token_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = model_service.ListModelsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.list_models(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/models" % client.transport._host, args[1]) + + +def test_list_models_rest_flattened_error(transport: str = "rest"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_models( + model_service.ListModelsRequest(), + page_size=951, + page_token="page_token_value", + ) + + +def test_list_models_rest_pager(transport: str = "rest"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token="abc", + ), + model_service.ListModelsResponse( + models=[], + next_page_token="def", + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + ], + next_page_token="ghi", + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(model_service.ListModelsResponse.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {} + + pager = client.list_models(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, model.Model) for i in results) + + pages = list(client.list_models(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.ModelServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.ModelServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ModelServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.ModelServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = ModelServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = ModelServiceClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.ModelServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ModelServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.ModelServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = ModelServiceClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.ModelServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.ModelServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ModelServiceGrpcTransport, + transports.ModelServiceGrpcAsyncIOTransport, + transports.ModelServiceRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "rest", + ], +) +def test_transport_kind(transport_name): + transport = ModelServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.ModelServiceGrpcTransport, + ) + + +def test_model_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.ModelServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_model_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.ai.generativelanguage_v1.services.model_service.transports.ModelServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.ModelServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "get_model", + "list_models", + "get_operation", + "cancel_operation", + "list_operations", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_model_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch( + "google.ai.generativelanguage_v1.services.model_service.transports.ModelServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ModelServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=None, + default_scopes=(), + quota_project_id="octopus", + ) + + +def test_model_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( + "google.ai.generativelanguage_v1.services.model_service.transports.ModelServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ModelServiceTransport() + adc.assert_called_once() + + +def test_model_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + ModelServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=(), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ModelServiceGrpcTransport, + transports.ModelServiceGrpcAsyncIOTransport, + ], +) +def test_model_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=(), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ModelServiceGrpcTransport, + transports.ModelServiceGrpcAsyncIOTransport, + transports.ModelServiceRestTransport, + ], +) +def test_model_service_transport_auth_gdch_credentials(transport_class): + host = "https://language.com" + api_audience_tests = [None, "https://language2.com"] + api_audience_expect = [host, "https://language2.com"] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, "default", autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock( + return_value=gdch_mock + ) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with(e) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.ModelServiceGrpcTransport, grpc_helpers), + (transports.ModelServiceGrpcAsyncIOTransport, grpc_helpers_async), + ], +) +def test_model_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + + create_channel.assert_called_with( + "generativelanguage.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=(), + scopes=["1", "2"], + default_host="generativelanguage.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "transport_class", + [transports.ModelServiceGrpcTransport, transports.ModelServiceGrpcAsyncIOTransport], +) +def test_model_service_grpc_transport_client_cert_source_for_mtls(transport_class): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_model_service_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ) as mock_configure_mtls_channel: + transports.ModelServiceRestTransport( + credentials=cred, client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_model_service_host_no_port(transport_name): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="generativelanguage.googleapis.com" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "generativelanguage.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://generativelanguage.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_model_service_host_with_port(transport_name): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="generativelanguage.googleapis.com:8000" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "generativelanguage.googleapis.com:8000" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://generativelanguage.googleapis.com:8000" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "rest", + ], +) +def test_model_service_client_transport_session_collision(transport_name): + creds1 = ga_credentials.AnonymousCredentials() + creds2 = ga_credentials.AnonymousCredentials() + client1 = ModelServiceClient( + credentials=creds1, + transport=transport_name, + ) + client2 = ModelServiceClient( + credentials=creds2, + transport=transport_name, + ) + session1 = client1.transport.get_model._session + session2 = client2.transport.get_model._session + assert session1 != session2 + session1 = client1.transport.list_models._session + session2 = client2.transport.list_models._session + assert session1 != session2 + + +def test_model_service_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.ModelServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_model_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.ModelServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [transports.ModelServiceGrpcTransport, transports.ModelServiceGrpcAsyncIOTransport], +) +def test_model_service_transport_channel_mtls_with_client_cert_source(transport_class): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [transports.ModelServiceGrpcTransport, transports.ModelServiceGrpcAsyncIOTransport], +) +def test_model_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_model_path(): + model = "squid" + expected = "models/{model}".format( + model=model, + ) + actual = ModelServiceClient.model_path(model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "model": "clam", + } + path = ModelServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_model_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "whelk" + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = ModelServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "octopus", + } + path = ModelServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "oyster" + expected = "folders/{folder}".format( + folder=folder, + ) + actual = ModelServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nudibranch", + } + path = ModelServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "cuttlefish" + expected = "organizations/{organization}".format( + organization=organization, + ) + actual = ModelServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "mussel", + } + path = ModelServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "winkle" + expected = "projects/{project}".format( + project=project, + ) + actual = ModelServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "nautilus", + } + path = ModelServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "scallop" + location = "abalone" + expected = "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + actual = ModelServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "squid", + "location": "clam", + } + path = ModelServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.ModelServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.ModelServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = ModelServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_cancel_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.CancelOperationRequest +): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "tunedModels/sample1/operations/sample2"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.cancel_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.CancelOperationRequest, + dict, + ], +) +def test_cancel_operation_rest(request_type): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "tunedModels/sample1/operations/sample2"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "{}" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.cancel_operation(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_get_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.GetOperationRequest +): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "tunedModels/sample1/operations/sample2"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.GetOperationRequest, + dict, + ], +) +def test_get_operation_rest(request_type): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "tunedModels/sample1/operations/sample2"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_operation(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_list_operations_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.ListOperationsRequest +): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict({"name": "operations"}, request) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_operations(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.ListOperationsRequest, + dict, + ], +) +def test_list_operations_rest(request_type): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "operations"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.ListOperationsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_operations(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +def test_cancel_operation(transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_operation_async(transport: str = "grpc_asyncio"): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_operation_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_cancel_operation_from_dict(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +@pytest.mark.asyncio +async def test_get_operation_async(transport: str = "grpc_asyncio"): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_get_operation_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_get_operation_from_dict(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +@pytest.mark.asyncio +async def test_list_operations_async(transport: str = "grpc_asyncio"): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +def test_list_operations_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_list_operations_from_dict(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_transport_close(): + transports = { + "rest": "_session", + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "rest", + "grpc", + ] + for transport in transports: + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + + +@pytest.mark.parametrize( + "client_class,transport_class", + [ + (ModelServiceClient, transports.ModelServiceGrpcTransport), + (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport), + ], +) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) diff --git a/packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1beta/__init__.py b/packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1beta/__init__.py new file mode 100644 index 000000000000..89a37dc92c5a --- /dev/null +++ b/packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1beta/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1beta/test_discuss_service.py b/packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1beta/test_discuss_service.py new file mode 100644 index 000000000000..b9d8d3c8c612 --- /dev/null +++ b/packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1beta/test_discuss_service.py @@ -0,0 +1,2553 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER + import mock + +from collections.abc import Iterable +import json +import math + +from google.api_core import gapic_v1, grpc_helpers, grpc_helpers_async, path_template +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +import google.auth +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account +from google.protobuf import json_format +import grpc +from grpc.experimental import aio +from proto.marshal.rules import wrappers +from proto.marshal.rules.dates import DurationRule, TimestampRule +import pytest +from requests import PreparedRequest, Request, Response +from requests.sessions import Session + +from google.ai.generativelanguage_v1beta.services.discuss_service import ( + DiscussServiceAsyncClient, + DiscussServiceClient, + transports, +) +from google.ai.generativelanguage_v1beta.types import citation, discuss_service, safety + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert DiscussServiceClient._get_default_mtls_endpoint(None) is None + assert ( + DiscussServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + DiscussServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + DiscussServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + DiscussServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + DiscussServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (DiscussServiceClient, "grpc"), + (DiscussServiceAsyncClient, "grpc_asyncio"), + (DiscussServiceClient, "rest"), + ], +) +def test_discuss_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "generativelanguage.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://generativelanguage.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.DiscussServiceGrpcTransport, "grpc"), + (transports.DiscussServiceGrpcAsyncIOTransport, "grpc_asyncio"), + (transports.DiscussServiceRestTransport, "rest"), + ], +) +def test_discuss_service_client_service_account_always_use_jwt( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (DiscussServiceClient, "grpc"), + (DiscussServiceAsyncClient, "grpc_asyncio"), + (DiscussServiceClient, "rest"), + ], +) +def test_discuss_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "generativelanguage.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://generativelanguage.googleapis.com" + ) + + +def test_discuss_service_client_get_transport_class(): + transport = DiscussServiceClient.get_transport_class() + available_transports = [ + transports.DiscussServiceGrpcTransport, + transports.DiscussServiceRestTransport, + ] + assert transport in available_transports + + transport = DiscussServiceClient.get_transport_class("grpc") + assert transport == transports.DiscussServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (DiscussServiceClient, transports.DiscussServiceGrpcTransport, "grpc"), + ( + DiscussServiceAsyncClient, + transports.DiscussServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + (DiscussServiceClient, transports.DiscussServiceRestTransport, "rest"), + ], +) +@mock.patch.object( + DiscussServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(DiscussServiceClient), +) +@mock.patch.object( + DiscussServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(DiscussServiceAsyncClient), +) +def test_discuss_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(DiscussServiceClient, "get_transport_class") as gtc: + transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(DiscussServiceClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions( + api_audience="https://language.googleapis.com" + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com", + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + (DiscussServiceClient, transports.DiscussServiceGrpcTransport, "grpc", "true"), + ( + DiscussServiceAsyncClient, + transports.DiscussServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + (DiscussServiceClient, transports.DiscussServiceGrpcTransport, "grpc", "false"), + ( + DiscussServiceAsyncClient, + transports.DiscussServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + (DiscussServiceClient, transports.DiscussServiceRestTransport, "rest", "true"), + (DiscussServiceClient, transports.DiscussServiceRestTransport, "rest", "false"), + ], +) +@mock.patch.object( + DiscussServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(DiscussServiceClient), +) +@mock.patch.object( + DiscussServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(DiscussServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_discuss_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class", [DiscussServiceClient, DiscussServiceAsyncClient] +) +@mock.patch.object( + DiscussServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(DiscussServiceClient), +) +@mock.patch.object( + DiscussServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(DiscussServiceAsyncClient), +) +def test_discuss_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_client_cert_source, + ): + ( + api_endpoint, + cert_source, + ) = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (DiscussServiceClient, transports.DiscussServiceGrpcTransport, "grpc"), + ( + DiscussServiceAsyncClient, + transports.DiscussServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + (DiscussServiceClient, transports.DiscussServiceRestTransport, "rest"), + ], +) +def test_discuss_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + DiscussServiceClient, + transports.DiscussServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + DiscussServiceAsyncClient, + transports.DiscussServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + (DiscussServiceClient, transports.DiscussServiceRestTransport, "rest", None), + ], +) +def test_discuss_service_client_client_options_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +def test_discuss_service_client_client_options_from_dict(): + with mock.patch( + "google.ai.generativelanguage_v1beta.services.discuss_service.transports.DiscussServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = DiscussServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + DiscussServiceClient, + transports.DiscussServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + DiscussServiceAsyncClient, + transports.DiscussServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ], +) +def test_discuss_service_client_create_channel_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "generativelanguage.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=(), + scopes=None, + default_host="generativelanguage.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + discuss_service.GenerateMessageRequest, + dict, + ], +) +def test_generate_message(request_type, transport: str = "grpc"): + client = DiscussServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_message), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = discuss_service.GenerateMessageResponse() + response = client.generate_message(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == discuss_service.GenerateMessageRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, discuss_service.GenerateMessageResponse) + + +def test_generate_message_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DiscussServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_message), "__call__") as call: + client.generate_message() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == discuss_service.GenerateMessageRequest() + + +@pytest.mark.asyncio +async def test_generate_message_async( + transport: str = "grpc_asyncio", request_type=discuss_service.GenerateMessageRequest +): + client = DiscussServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_message), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + discuss_service.GenerateMessageResponse() + ) + response = await client.generate_message(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == discuss_service.GenerateMessageRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, discuss_service.GenerateMessageResponse) + + +@pytest.mark.asyncio +async def test_generate_message_async_from_dict(): + await test_generate_message_async(request_type=dict) + + +def test_generate_message_field_headers(): + client = DiscussServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = discuss_service.GenerateMessageRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_message), "__call__") as call: + call.return_value = discuss_service.GenerateMessageResponse() + client.generate_message(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_generate_message_field_headers_async(): + client = DiscussServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = discuss_service.GenerateMessageRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_message), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + discuss_service.GenerateMessageResponse() + ) + await client.generate_message(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +def test_generate_message_flattened(): + client = DiscussServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_message), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = discuss_service.GenerateMessageResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.generate_message( + model="model_value", + prompt=discuss_service.MessagePrompt(context="context_value"), + temperature=0.1198, + candidate_count=1573, + top_p=0.546, + top_k=541, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].prompt + mock_val = discuss_service.MessagePrompt(context="context_value") + assert arg == mock_val + assert math.isclose(args[0].temperature, 0.1198, rel_tol=1e-6) + arg = args[0].candidate_count + mock_val = 1573 + assert arg == mock_val + assert math.isclose(args[0].top_p, 0.546, rel_tol=1e-6) + arg = args[0].top_k + mock_val = 541 + assert arg == mock_val + + +def test_generate_message_flattened_error(): + client = DiscussServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.generate_message( + discuss_service.GenerateMessageRequest(), + model="model_value", + prompt=discuss_service.MessagePrompt(context="context_value"), + temperature=0.1198, + candidate_count=1573, + top_p=0.546, + top_k=541, + ) + + +@pytest.mark.asyncio +async def test_generate_message_flattened_async(): + client = DiscussServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_message), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = discuss_service.GenerateMessageResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + discuss_service.GenerateMessageResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.generate_message( + model="model_value", + prompt=discuss_service.MessagePrompt(context="context_value"), + temperature=0.1198, + candidate_count=1573, + top_p=0.546, + top_k=541, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].prompt + mock_val = discuss_service.MessagePrompt(context="context_value") + assert arg == mock_val + assert math.isclose(args[0].temperature, 0.1198, rel_tol=1e-6) + arg = args[0].candidate_count + mock_val = 1573 + assert arg == mock_val + assert math.isclose(args[0].top_p, 0.546, rel_tol=1e-6) + arg = args[0].top_k + mock_val = 541 + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_generate_message_flattened_error_async(): + client = DiscussServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.generate_message( + discuss_service.GenerateMessageRequest(), + model="model_value", + prompt=discuss_service.MessagePrompt(context="context_value"), + temperature=0.1198, + candidate_count=1573, + top_p=0.546, + top_k=541, + ) + + +@pytest.mark.parametrize( + "request_type", + [ + discuss_service.CountMessageTokensRequest, + dict, + ], +) +def test_count_message_tokens(request_type, transport: str = "grpc"): + client = DiscussServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.count_message_tokens), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = discuss_service.CountMessageTokensResponse( + token_count=1193, + ) + response = client.count_message_tokens(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == discuss_service.CountMessageTokensRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, discuss_service.CountMessageTokensResponse) + assert response.token_count == 1193 + + +def test_count_message_tokens_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DiscussServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.count_message_tokens), "__call__" + ) as call: + client.count_message_tokens() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == discuss_service.CountMessageTokensRequest() + + +@pytest.mark.asyncio +async def test_count_message_tokens_async( + transport: str = "grpc_asyncio", + request_type=discuss_service.CountMessageTokensRequest, +): + client = DiscussServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.count_message_tokens), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + discuss_service.CountMessageTokensResponse( + token_count=1193, + ) + ) + response = await client.count_message_tokens(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == discuss_service.CountMessageTokensRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, discuss_service.CountMessageTokensResponse) + assert response.token_count == 1193 + + +@pytest.mark.asyncio +async def test_count_message_tokens_async_from_dict(): + await test_count_message_tokens_async(request_type=dict) + + +def test_count_message_tokens_field_headers(): + client = DiscussServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = discuss_service.CountMessageTokensRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.count_message_tokens), "__call__" + ) as call: + call.return_value = discuss_service.CountMessageTokensResponse() + client.count_message_tokens(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_count_message_tokens_field_headers_async(): + client = DiscussServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = discuss_service.CountMessageTokensRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.count_message_tokens), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + discuss_service.CountMessageTokensResponse() + ) + await client.count_message_tokens(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +def test_count_message_tokens_flattened(): + client = DiscussServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.count_message_tokens), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = discuss_service.CountMessageTokensResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.count_message_tokens( + model="model_value", + prompt=discuss_service.MessagePrompt(context="context_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].prompt + mock_val = discuss_service.MessagePrompt(context="context_value") + assert arg == mock_val + + +def test_count_message_tokens_flattened_error(): + client = DiscussServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.count_message_tokens( + discuss_service.CountMessageTokensRequest(), + model="model_value", + prompt=discuss_service.MessagePrompt(context="context_value"), + ) + + +@pytest.mark.asyncio +async def test_count_message_tokens_flattened_async(): + client = DiscussServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.count_message_tokens), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = discuss_service.CountMessageTokensResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + discuss_service.CountMessageTokensResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.count_message_tokens( + model="model_value", + prompt=discuss_service.MessagePrompt(context="context_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].prompt + mock_val = discuss_service.MessagePrompt(context="context_value") + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_count_message_tokens_flattened_error_async(): + client = DiscussServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.count_message_tokens( + discuss_service.CountMessageTokensRequest(), + model="model_value", + prompt=discuss_service.MessagePrompt(context="context_value"), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + discuss_service.GenerateMessageRequest, + dict, + ], +) +def test_generate_message_rest(request_type): + client = DiscussServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"model": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = discuss_service.GenerateMessageResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = discuss_service.GenerateMessageResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.generate_message(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, discuss_service.GenerateMessageResponse) + + +def test_generate_message_rest_required_fields( + request_type=discuss_service.GenerateMessageRequest, +): + transport_class = transports.DiscussServiceRestTransport + + request_init = {} + request_init["model"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).generate_message._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["model"] = "model_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).generate_message._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "model" in jsonified_request + assert jsonified_request["model"] == "model_value" + + client = DiscussServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = discuss_service.GenerateMessageResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = discuss_service.GenerateMessageResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.generate_message(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_generate_message_rest_unset_required_fields(): + transport = transports.DiscussServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.generate_message._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "model", + "prompt", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_generate_message_rest_interceptors(null_interceptor): + transport = transports.DiscussServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.DiscussServiceRestInterceptor(), + ) + client = DiscussServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.DiscussServiceRestInterceptor, "post_generate_message" + ) as post, mock.patch.object( + transports.DiscussServiceRestInterceptor, "pre_generate_message" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = discuss_service.GenerateMessageRequest.pb( + discuss_service.GenerateMessageRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = discuss_service.GenerateMessageResponse.to_json( + discuss_service.GenerateMessageResponse() + ) + + request = discuss_service.GenerateMessageRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = discuss_service.GenerateMessageResponse() + + client.generate_message( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_generate_message_rest_bad_request( + transport: str = "rest", request_type=discuss_service.GenerateMessageRequest +): + client = DiscussServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"model": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.generate_message(request) + + +def test_generate_message_rest_flattened(): + client = DiscussServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = discuss_service.GenerateMessageResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"model": "models/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + model="model_value", + prompt=discuss_service.MessagePrompt(context="context_value"), + temperature=0.1198, + candidate_count=1573, + top_p=0.546, + top_k=541, + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = discuss_service.GenerateMessageResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.generate_message(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{model=models/*}:generateMessage" % client.transport._host, + args[1], + ) + + +def test_generate_message_rest_flattened_error(transport: str = "rest"): + client = DiscussServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.generate_message( + discuss_service.GenerateMessageRequest(), + model="model_value", + prompt=discuss_service.MessagePrompt(context="context_value"), + temperature=0.1198, + candidate_count=1573, + top_p=0.546, + top_k=541, + ) + + +def test_generate_message_rest_error(): + client = DiscussServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + discuss_service.CountMessageTokensRequest, + dict, + ], +) +def test_count_message_tokens_rest(request_type): + client = DiscussServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"model": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = discuss_service.CountMessageTokensResponse( + token_count=1193, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = discuss_service.CountMessageTokensResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.count_message_tokens(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, discuss_service.CountMessageTokensResponse) + assert response.token_count == 1193 + + +def test_count_message_tokens_rest_required_fields( + request_type=discuss_service.CountMessageTokensRequest, +): + transport_class = transports.DiscussServiceRestTransport + + request_init = {} + request_init["model"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).count_message_tokens._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["model"] = "model_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).count_message_tokens._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "model" in jsonified_request + assert jsonified_request["model"] == "model_value" + + client = DiscussServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = discuss_service.CountMessageTokensResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = discuss_service.CountMessageTokensResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.count_message_tokens(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_count_message_tokens_rest_unset_required_fields(): + transport = transports.DiscussServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.count_message_tokens._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "model", + "prompt", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_count_message_tokens_rest_interceptors(null_interceptor): + transport = transports.DiscussServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.DiscussServiceRestInterceptor(), + ) + client = DiscussServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.DiscussServiceRestInterceptor, "post_count_message_tokens" + ) as post, mock.patch.object( + transports.DiscussServiceRestInterceptor, "pre_count_message_tokens" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = discuss_service.CountMessageTokensRequest.pb( + discuss_service.CountMessageTokensRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = discuss_service.CountMessageTokensResponse.to_json( + discuss_service.CountMessageTokensResponse() + ) + + request = discuss_service.CountMessageTokensRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = discuss_service.CountMessageTokensResponse() + + client.count_message_tokens( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_count_message_tokens_rest_bad_request( + transport: str = "rest", request_type=discuss_service.CountMessageTokensRequest +): + client = DiscussServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"model": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.count_message_tokens(request) + + +def test_count_message_tokens_rest_flattened(): + client = DiscussServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = discuss_service.CountMessageTokensResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"model": "models/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + model="model_value", + prompt=discuss_service.MessagePrompt(context="context_value"), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = discuss_service.CountMessageTokensResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.count_message_tokens(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{model=models/*}:countMessageTokens" % client.transport._host, + args[1], + ) + + +def test_count_message_tokens_rest_flattened_error(transport: str = "rest"): + client = DiscussServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.count_message_tokens( + discuss_service.CountMessageTokensRequest(), + model="model_value", + prompt=discuss_service.MessagePrompt(context="context_value"), + ) + + +def test_count_message_tokens_rest_error(): + client = DiscussServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.DiscussServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = DiscussServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.DiscussServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = DiscussServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.DiscussServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = DiscussServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = DiscussServiceClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.DiscussServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = DiscussServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.DiscussServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = DiscussServiceClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.DiscussServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.DiscussServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.DiscussServiceGrpcTransport, + transports.DiscussServiceGrpcAsyncIOTransport, + transports.DiscussServiceRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "rest", + ], +) +def test_transport_kind(transport_name): + transport = DiscussServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = DiscussServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.DiscussServiceGrpcTransport, + ) + + +def test_discuss_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.DiscussServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_discuss_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.ai.generativelanguage_v1beta.services.discuss_service.transports.DiscussServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.DiscussServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "generate_message", + "count_message_tokens", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_discuss_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch( + "google.ai.generativelanguage_v1beta.services.discuss_service.transports.DiscussServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.DiscussServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=None, + default_scopes=(), + quota_project_id="octopus", + ) + + +def test_discuss_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( + "google.ai.generativelanguage_v1beta.services.discuss_service.transports.DiscussServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.DiscussServiceTransport() + adc.assert_called_once() + + +def test_discuss_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + DiscussServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=(), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.DiscussServiceGrpcTransport, + transports.DiscussServiceGrpcAsyncIOTransport, + ], +) +def test_discuss_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=(), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.DiscussServiceGrpcTransport, + transports.DiscussServiceGrpcAsyncIOTransport, + transports.DiscussServiceRestTransport, + ], +) +def test_discuss_service_transport_auth_gdch_credentials(transport_class): + host = "https://language.com" + api_audience_tests = [None, "https://language2.com"] + api_audience_expect = [host, "https://language2.com"] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, "default", autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock( + return_value=gdch_mock + ) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with(e) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.DiscussServiceGrpcTransport, grpc_helpers), + (transports.DiscussServiceGrpcAsyncIOTransport, grpc_helpers_async), + ], +) +def test_discuss_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + + create_channel.assert_called_with( + "generativelanguage.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=(), + scopes=["1", "2"], + default_host="generativelanguage.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.DiscussServiceGrpcTransport, + transports.DiscussServiceGrpcAsyncIOTransport, + ], +) +def test_discuss_service_grpc_transport_client_cert_source_for_mtls(transport_class): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_discuss_service_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ) as mock_configure_mtls_channel: + transports.DiscussServiceRestTransport( + credentials=cred, client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_discuss_service_host_no_port(transport_name): + client = DiscussServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="generativelanguage.googleapis.com" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "generativelanguage.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://generativelanguage.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_discuss_service_host_with_port(transport_name): + client = DiscussServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="generativelanguage.googleapis.com:8000" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "generativelanguage.googleapis.com:8000" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://generativelanguage.googleapis.com:8000" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "rest", + ], +) +def test_discuss_service_client_transport_session_collision(transport_name): + creds1 = ga_credentials.AnonymousCredentials() + creds2 = ga_credentials.AnonymousCredentials() + client1 = DiscussServiceClient( + credentials=creds1, + transport=transport_name, + ) + client2 = DiscussServiceClient( + credentials=creds2, + transport=transport_name, + ) + session1 = client1.transport.generate_message._session + session2 = client2.transport.generate_message._session + assert session1 != session2 + session1 = client1.transport.count_message_tokens._session + session2 = client2.transport.count_message_tokens._session + assert session1 != session2 + + +def test_discuss_service_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.DiscussServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_discuss_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.DiscussServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.DiscussServiceGrpcTransport, + transports.DiscussServiceGrpcAsyncIOTransport, + ], +) +def test_discuss_service_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.DiscussServiceGrpcTransport, + transports.DiscussServiceGrpcAsyncIOTransport, + ], +) +def test_discuss_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_model_path(): + model = "squid" + expected = "models/{model}".format( + model=model, + ) + actual = DiscussServiceClient.model_path(model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "model": "clam", + } + path = DiscussServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = DiscussServiceClient.parse_model_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "whelk" + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = DiscussServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "octopus", + } + path = DiscussServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = DiscussServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "oyster" + expected = "folders/{folder}".format( + folder=folder, + ) + actual = DiscussServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nudibranch", + } + path = DiscussServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = DiscussServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "cuttlefish" + expected = "organizations/{organization}".format( + organization=organization, + ) + actual = DiscussServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "mussel", + } + path = DiscussServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = DiscussServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "winkle" + expected = "projects/{project}".format( + project=project, + ) + actual = DiscussServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "nautilus", + } + path = DiscussServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = DiscussServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "scallop" + location = "abalone" + expected = "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + actual = DiscussServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "squid", + "location": "clam", + } + path = DiscussServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = DiscussServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.DiscussServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = DiscussServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.DiscussServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = DiscussServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = DiscussServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "rest": "_session", + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = DiscussServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "rest", + "grpc", + ] + for transport in transports: + client = DiscussServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + + +@pytest.mark.parametrize( + "client_class,transport_class", + [ + (DiscussServiceClient, transports.DiscussServiceGrpcTransport), + (DiscussServiceAsyncClient, transports.DiscussServiceGrpcAsyncIOTransport), + ], +) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) diff --git a/packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1beta/test_generative_service.py b/packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1beta/test_generative_service.py new file mode 100644 index 000000000000..ebf8f86f1b02 --- /dev/null +++ b/packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1beta/test_generative_service.py @@ -0,0 +1,4722 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER + import mock + +from collections.abc import Iterable +import json +import math + +from google.api_core import gapic_v1, grpc_helpers, grpc_helpers_async, path_template +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +import google.auth +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account +from google.protobuf import json_format +from google.protobuf import struct_pb2 # type: ignore +import grpc +from grpc.experimental import aio +from proto.marshal.rules import wrappers +from proto.marshal.rules.dates import DurationRule, TimestampRule +import pytest +from requests import PreparedRequest, Request, Response +from requests.sessions import Session + +from google.ai.generativelanguage_v1beta.services.generative_service import ( + GenerativeServiceAsyncClient, + GenerativeServiceClient, + transports, +) +from google.ai.generativelanguage_v1beta.types import ( + generative_service, + retriever, + safety, +) +from google.ai.generativelanguage_v1beta.types import content +from google.ai.generativelanguage_v1beta.types import content as gag_content + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert GenerativeServiceClient._get_default_mtls_endpoint(None) is None + assert ( + GenerativeServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + GenerativeServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + GenerativeServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + GenerativeServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + GenerativeServiceClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (GenerativeServiceClient, "grpc"), + (GenerativeServiceAsyncClient, "grpc_asyncio"), + (GenerativeServiceClient, "rest"), + ], +) +def test_generative_service_client_from_service_account_info( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "generativelanguage.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://generativelanguage.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.GenerativeServiceGrpcTransport, "grpc"), + (transports.GenerativeServiceGrpcAsyncIOTransport, "grpc_asyncio"), + (transports.GenerativeServiceRestTransport, "rest"), + ], +) +def test_generative_service_client_service_account_always_use_jwt( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (GenerativeServiceClient, "grpc"), + (GenerativeServiceAsyncClient, "grpc_asyncio"), + (GenerativeServiceClient, "rest"), + ], +) +def test_generative_service_client_from_service_account_file( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "generativelanguage.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://generativelanguage.googleapis.com" + ) + + +def test_generative_service_client_get_transport_class(): + transport = GenerativeServiceClient.get_transport_class() + available_transports = [ + transports.GenerativeServiceGrpcTransport, + transports.GenerativeServiceRestTransport, + ] + assert transport in available_transports + + transport = GenerativeServiceClient.get_transport_class("grpc") + assert transport == transports.GenerativeServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (GenerativeServiceClient, transports.GenerativeServiceGrpcTransport, "grpc"), + ( + GenerativeServiceAsyncClient, + transports.GenerativeServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + (GenerativeServiceClient, transports.GenerativeServiceRestTransport, "rest"), + ], +) +@mock.patch.object( + GenerativeServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(GenerativeServiceClient), +) +@mock.patch.object( + GenerativeServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(GenerativeServiceAsyncClient), +) +def test_generative_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(GenerativeServiceClient, "get_transport_class") as gtc: + transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(GenerativeServiceClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions( + api_audience="https://language.googleapis.com" + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com", + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + GenerativeServiceClient, + transports.GenerativeServiceGrpcTransport, + "grpc", + "true", + ), + ( + GenerativeServiceAsyncClient, + transports.GenerativeServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + GenerativeServiceClient, + transports.GenerativeServiceGrpcTransport, + "grpc", + "false", + ), + ( + GenerativeServiceAsyncClient, + transports.GenerativeServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ( + GenerativeServiceClient, + transports.GenerativeServiceRestTransport, + "rest", + "true", + ), + ( + GenerativeServiceClient, + transports.GenerativeServiceRestTransport, + "rest", + "false", + ), + ], +) +@mock.patch.object( + GenerativeServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(GenerativeServiceClient), +) +@mock.patch.object( + GenerativeServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(GenerativeServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_generative_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class", [GenerativeServiceClient, GenerativeServiceAsyncClient] +) +@mock.patch.object( + GenerativeServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(GenerativeServiceClient), +) +@mock.patch.object( + GenerativeServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(GenerativeServiceAsyncClient), +) +def test_generative_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_client_cert_source, + ): + ( + api_endpoint, + cert_source, + ) = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (GenerativeServiceClient, transports.GenerativeServiceGrpcTransport, "grpc"), + ( + GenerativeServiceAsyncClient, + transports.GenerativeServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + (GenerativeServiceClient, transports.GenerativeServiceRestTransport, "rest"), + ], +) +def test_generative_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + GenerativeServiceClient, + transports.GenerativeServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + GenerativeServiceAsyncClient, + transports.GenerativeServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ( + GenerativeServiceClient, + transports.GenerativeServiceRestTransport, + "rest", + None, + ), + ], +) +def test_generative_service_client_client_options_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +def test_generative_service_client_client_options_from_dict(): + with mock.patch( + "google.ai.generativelanguage_v1beta.services.generative_service.transports.GenerativeServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = GenerativeServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + GenerativeServiceClient, + transports.GenerativeServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + GenerativeServiceAsyncClient, + transports.GenerativeServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ], +) +def test_generative_service_client_create_channel_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "generativelanguage.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=(), + scopes=None, + default_host="generativelanguage.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + generative_service.GenerateContentRequest, + dict, + ], +) +def test_generate_content(request_type, transport: str = "grpc"): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_content), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = generative_service.GenerateContentResponse() + response = client.generate_content(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == generative_service.GenerateContentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, generative_service.GenerateContentResponse) + + +def test_generate_content_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_content), "__call__") as call: + client.generate_content() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == generative_service.GenerateContentRequest() + + +@pytest.mark.asyncio +async def test_generate_content_async( + transport: str = "grpc_asyncio", + request_type=generative_service.GenerateContentRequest, +): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_content), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + generative_service.GenerateContentResponse() + ) + response = await client.generate_content(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == generative_service.GenerateContentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, generative_service.GenerateContentResponse) + + +@pytest.mark.asyncio +async def test_generate_content_async_from_dict(): + await test_generate_content_async(request_type=dict) + + +def test_generate_content_field_headers(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = generative_service.GenerateContentRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_content), "__call__") as call: + call.return_value = generative_service.GenerateContentResponse() + client.generate_content(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_generate_content_field_headers_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = generative_service.GenerateContentRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_content), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + generative_service.GenerateContentResponse() + ) + await client.generate_content(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +def test_generate_content_flattened(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_content), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = generative_service.GenerateContentResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.generate_content( + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].contents + mock_val = [content.Content(parts=[content.Part(text="text_value")])] + assert arg == mock_val + + +def test_generate_content_flattened_error(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.generate_content( + generative_service.GenerateContentRequest(), + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + + +@pytest.mark.asyncio +async def test_generate_content_flattened_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_content), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = generative_service.GenerateContentResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + generative_service.GenerateContentResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.generate_content( + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].contents + mock_val = [content.Content(parts=[content.Part(text="text_value")])] + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_generate_content_flattened_error_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.generate_content( + generative_service.GenerateContentRequest(), + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + generative_service.GenerateAnswerRequest, + dict, + ], +) +def test_generate_answer(request_type, transport: str = "grpc"): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_answer), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = generative_service.GenerateAnswerResponse( + answerable_probability=0.234, + ) + response = client.generate_answer(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == generative_service.GenerateAnswerRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, generative_service.GenerateAnswerResponse) + assert math.isclose(response.answerable_probability, 0.234, rel_tol=1e-6) + + +def test_generate_answer_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_answer), "__call__") as call: + client.generate_answer() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == generative_service.GenerateAnswerRequest() + + +@pytest.mark.asyncio +async def test_generate_answer_async( + transport: str = "grpc_asyncio", + request_type=generative_service.GenerateAnswerRequest, +): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_answer), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + generative_service.GenerateAnswerResponse( + answerable_probability=0.234, + ) + ) + response = await client.generate_answer(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == generative_service.GenerateAnswerRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, generative_service.GenerateAnswerResponse) + assert math.isclose(response.answerable_probability, 0.234, rel_tol=1e-6) + + +@pytest.mark.asyncio +async def test_generate_answer_async_from_dict(): + await test_generate_answer_async(request_type=dict) + + +def test_generate_answer_field_headers(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = generative_service.GenerateAnswerRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_answer), "__call__") as call: + call.return_value = generative_service.GenerateAnswerResponse() + client.generate_answer(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_generate_answer_field_headers_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = generative_service.GenerateAnswerRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_answer), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + generative_service.GenerateAnswerResponse() + ) + await client.generate_answer(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +def test_generate_answer_flattened(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_answer), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = generative_service.GenerateAnswerResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.generate_answer( + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + safety_settings=[ + safety.SafetySetting( + category=safety.HarmCategory.HARM_CATEGORY_DEROGATORY + ) + ], + answer_style=generative_service.GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].contents + mock_val = [content.Content(parts=[content.Part(text="text_value")])] + assert arg == mock_val + arg = args[0].safety_settings + mock_val = [ + safety.SafetySetting(category=safety.HarmCategory.HARM_CATEGORY_DEROGATORY) + ] + assert arg == mock_val + arg = args[0].answer_style + mock_val = generative_service.GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE + assert arg == mock_val + + +def test_generate_answer_flattened_error(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.generate_answer( + generative_service.GenerateAnswerRequest(), + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + safety_settings=[ + safety.SafetySetting( + category=safety.HarmCategory.HARM_CATEGORY_DEROGATORY + ) + ], + answer_style=generative_service.GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE, + ) + + +@pytest.mark.asyncio +async def test_generate_answer_flattened_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_answer), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = generative_service.GenerateAnswerResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + generative_service.GenerateAnswerResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.generate_answer( + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + safety_settings=[ + safety.SafetySetting( + category=safety.HarmCategory.HARM_CATEGORY_DEROGATORY + ) + ], + answer_style=generative_service.GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].contents + mock_val = [content.Content(parts=[content.Part(text="text_value")])] + assert arg == mock_val + arg = args[0].safety_settings + mock_val = [ + safety.SafetySetting(category=safety.HarmCategory.HARM_CATEGORY_DEROGATORY) + ] + assert arg == mock_val + arg = args[0].answer_style + mock_val = generative_service.GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_generate_answer_flattened_error_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.generate_answer( + generative_service.GenerateAnswerRequest(), + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + safety_settings=[ + safety.SafetySetting( + category=safety.HarmCategory.HARM_CATEGORY_DEROGATORY + ) + ], + answer_style=generative_service.GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE, + ) + + +@pytest.mark.parametrize( + "request_type", + [ + generative_service.GenerateContentRequest, + dict, + ], +) +def test_stream_generate_content(request_type, transport: str = "grpc"): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stream_generate_content), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iter([generative_service.GenerateContentResponse()]) + response = client.stream_generate_content(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == generative_service.GenerateContentRequest() + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, generative_service.GenerateContentResponse) + + +def test_stream_generate_content_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stream_generate_content), "__call__" + ) as call: + client.stream_generate_content() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == generative_service.GenerateContentRequest() + + +@pytest.mark.asyncio +async def test_stream_generate_content_async( + transport: str = "grpc_asyncio", + request_type=generative_service.GenerateContentRequest, +): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stream_generate_content), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[generative_service.GenerateContentResponse()] + ) + response = await client.stream_generate_content(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == generative_service.GenerateContentRequest() + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, generative_service.GenerateContentResponse) + + +@pytest.mark.asyncio +async def test_stream_generate_content_async_from_dict(): + await test_stream_generate_content_async(request_type=dict) + + +def test_stream_generate_content_field_headers(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = generative_service.GenerateContentRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stream_generate_content), "__call__" + ) as call: + call.return_value = iter([generative_service.GenerateContentResponse()]) + client.stream_generate_content(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_stream_generate_content_field_headers_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = generative_service.GenerateContentRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stream_generate_content), "__call__" + ) as call: + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[generative_service.GenerateContentResponse()] + ) + await client.stream_generate_content(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +def test_stream_generate_content_flattened(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stream_generate_content), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iter([generative_service.GenerateContentResponse()]) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.stream_generate_content( + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].contents + mock_val = [content.Content(parts=[content.Part(text="text_value")])] + assert arg == mock_val + + +def test_stream_generate_content_flattened_error(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.stream_generate_content( + generative_service.GenerateContentRequest(), + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + + +@pytest.mark.asyncio +async def test_stream_generate_content_flattened_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stream_generate_content), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iter([generative_service.GenerateContentResponse()]) + + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.stream_generate_content( + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].contents + mock_val = [content.Content(parts=[content.Part(text="text_value")])] + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_stream_generate_content_flattened_error_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.stream_generate_content( + generative_service.GenerateContentRequest(), + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + generative_service.EmbedContentRequest, + dict, + ], +) +def test_embed_content(request_type, transport: str = "grpc"): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.embed_content), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = generative_service.EmbedContentResponse() + response = client.embed_content(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == generative_service.EmbedContentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, generative_service.EmbedContentResponse) + + +def test_embed_content_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.embed_content), "__call__") as call: + client.embed_content() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == generative_service.EmbedContentRequest() + + +@pytest.mark.asyncio +async def test_embed_content_async( + transport: str = "grpc_asyncio", request_type=generative_service.EmbedContentRequest +): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.embed_content), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + generative_service.EmbedContentResponse() + ) + response = await client.embed_content(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == generative_service.EmbedContentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, generative_service.EmbedContentResponse) + + +@pytest.mark.asyncio +async def test_embed_content_async_from_dict(): + await test_embed_content_async(request_type=dict) + + +def test_embed_content_field_headers(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = generative_service.EmbedContentRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.embed_content), "__call__") as call: + call.return_value = generative_service.EmbedContentResponse() + client.embed_content(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_embed_content_field_headers_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = generative_service.EmbedContentRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.embed_content), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + generative_service.EmbedContentResponse() + ) + await client.embed_content(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +def test_embed_content_flattened(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.embed_content), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = generative_service.EmbedContentResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.embed_content( + model="model_value", + content=gag_content.Content(parts=[gag_content.Part(text="text_value")]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].content + mock_val = gag_content.Content(parts=[gag_content.Part(text="text_value")]) + assert arg == mock_val + + +def test_embed_content_flattened_error(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.embed_content( + generative_service.EmbedContentRequest(), + model="model_value", + content=gag_content.Content(parts=[gag_content.Part(text="text_value")]), + ) + + +@pytest.mark.asyncio +async def test_embed_content_flattened_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.embed_content), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = generative_service.EmbedContentResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + generative_service.EmbedContentResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.embed_content( + model="model_value", + content=gag_content.Content(parts=[gag_content.Part(text="text_value")]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].content + mock_val = gag_content.Content(parts=[gag_content.Part(text="text_value")]) + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_embed_content_flattened_error_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.embed_content( + generative_service.EmbedContentRequest(), + model="model_value", + content=gag_content.Content(parts=[gag_content.Part(text="text_value")]), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + generative_service.BatchEmbedContentsRequest, + dict, + ], +) +def test_batch_embed_contents(request_type, transport: str = "grpc"): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_embed_contents), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = generative_service.BatchEmbedContentsResponse() + response = client.batch_embed_contents(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == generative_service.BatchEmbedContentsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, generative_service.BatchEmbedContentsResponse) + + +def test_batch_embed_contents_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_embed_contents), "__call__" + ) as call: + client.batch_embed_contents() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == generative_service.BatchEmbedContentsRequest() + + +@pytest.mark.asyncio +async def test_batch_embed_contents_async( + transport: str = "grpc_asyncio", + request_type=generative_service.BatchEmbedContentsRequest, +): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_embed_contents), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + generative_service.BatchEmbedContentsResponse() + ) + response = await client.batch_embed_contents(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == generative_service.BatchEmbedContentsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, generative_service.BatchEmbedContentsResponse) + + +@pytest.mark.asyncio +async def test_batch_embed_contents_async_from_dict(): + await test_batch_embed_contents_async(request_type=dict) + + +def test_batch_embed_contents_field_headers(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = generative_service.BatchEmbedContentsRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_embed_contents), "__call__" + ) as call: + call.return_value = generative_service.BatchEmbedContentsResponse() + client.batch_embed_contents(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_batch_embed_contents_field_headers_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = generative_service.BatchEmbedContentsRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_embed_contents), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + generative_service.BatchEmbedContentsResponse() + ) + await client.batch_embed_contents(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +def test_batch_embed_contents_flattened(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_embed_contents), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = generative_service.BatchEmbedContentsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.batch_embed_contents( + model="model_value", + requests=[generative_service.EmbedContentRequest(model="model_value")], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].requests + mock_val = [generative_service.EmbedContentRequest(model="model_value")] + assert arg == mock_val + + +def test_batch_embed_contents_flattened_error(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_embed_contents( + generative_service.BatchEmbedContentsRequest(), + model="model_value", + requests=[generative_service.EmbedContentRequest(model="model_value")], + ) + + +@pytest.mark.asyncio +async def test_batch_embed_contents_flattened_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_embed_contents), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = generative_service.BatchEmbedContentsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + generative_service.BatchEmbedContentsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.batch_embed_contents( + model="model_value", + requests=[generative_service.EmbedContentRequest(model="model_value")], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].requests + mock_val = [generative_service.EmbedContentRequest(model="model_value")] + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_batch_embed_contents_flattened_error_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.batch_embed_contents( + generative_service.BatchEmbedContentsRequest(), + model="model_value", + requests=[generative_service.EmbedContentRequest(model="model_value")], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + generative_service.CountTokensRequest, + dict, + ], +) +def test_count_tokens(request_type, transport: str = "grpc"): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.count_tokens), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = generative_service.CountTokensResponse( + total_tokens=1303, + ) + response = client.count_tokens(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == generative_service.CountTokensRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, generative_service.CountTokensResponse) + assert response.total_tokens == 1303 + + +def test_count_tokens_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.count_tokens), "__call__") as call: + client.count_tokens() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == generative_service.CountTokensRequest() + + +@pytest.mark.asyncio +async def test_count_tokens_async( + transport: str = "grpc_asyncio", request_type=generative_service.CountTokensRequest +): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.count_tokens), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + generative_service.CountTokensResponse( + total_tokens=1303, + ) + ) + response = await client.count_tokens(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == generative_service.CountTokensRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, generative_service.CountTokensResponse) + assert response.total_tokens == 1303 + + +@pytest.mark.asyncio +async def test_count_tokens_async_from_dict(): + await test_count_tokens_async(request_type=dict) + + +def test_count_tokens_field_headers(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = generative_service.CountTokensRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.count_tokens), "__call__") as call: + call.return_value = generative_service.CountTokensResponse() + client.count_tokens(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_count_tokens_field_headers_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = generative_service.CountTokensRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.count_tokens), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + generative_service.CountTokensResponse() + ) + await client.count_tokens(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +def test_count_tokens_flattened(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.count_tokens), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = generative_service.CountTokensResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.count_tokens( + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].contents + mock_val = [content.Content(parts=[content.Part(text="text_value")])] + assert arg == mock_val + + +def test_count_tokens_flattened_error(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.count_tokens( + generative_service.CountTokensRequest(), + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + + +@pytest.mark.asyncio +async def test_count_tokens_flattened_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.count_tokens), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = generative_service.CountTokensResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + generative_service.CountTokensResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.count_tokens( + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].contents + mock_val = [content.Content(parts=[content.Part(text="text_value")])] + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_count_tokens_flattened_error_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.count_tokens( + generative_service.CountTokensRequest(), + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + generative_service.GenerateContentRequest, + dict, + ], +) +def test_generate_content_rest(request_type): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"model": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = generative_service.GenerateContentResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = generative_service.GenerateContentResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.generate_content(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, generative_service.GenerateContentResponse) + + +def test_generate_content_rest_required_fields( + request_type=generative_service.GenerateContentRequest, +): + transport_class = transports.GenerativeServiceRestTransport + + request_init = {} + request_init["model"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).generate_content._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["model"] = "model_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).generate_content._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "model" in jsonified_request + assert jsonified_request["model"] == "model_value" + + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = generative_service.GenerateContentResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = generative_service.GenerateContentResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.generate_content(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_generate_content_rest_unset_required_fields(): + transport = transports.GenerativeServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.generate_content._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "model", + "contents", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_generate_content_rest_interceptors(null_interceptor): + transport = transports.GenerativeServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.GenerativeServiceRestInterceptor(), + ) + client = GenerativeServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.GenerativeServiceRestInterceptor, "post_generate_content" + ) as post, mock.patch.object( + transports.GenerativeServiceRestInterceptor, "pre_generate_content" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = generative_service.GenerateContentRequest.pb( + generative_service.GenerateContentRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = generative_service.GenerateContentResponse.to_json( + generative_service.GenerateContentResponse() + ) + + request = generative_service.GenerateContentRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = generative_service.GenerateContentResponse() + + client.generate_content( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_generate_content_rest_bad_request( + transport: str = "rest", request_type=generative_service.GenerateContentRequest +): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"model": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.generate_content(request) + + +def test_generate_content_rest_flattened(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = generative_service.GenerateContentResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"model": "models/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = generative_service.GenerateContentResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.generate_content(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{model=models/*}:generateContent" % client.transport._host, + args[1], + ) + + +def test_generate_content_rest_flattened_error(transport: str = "rest"): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.generate_content( + generative_service.GenerateContentRequest(), + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + + +def test_generate_content_rest_error(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + generative_service.GenerateAnswerRequest, + dict, + ], +) +def test_generate_answer_rest(request_type): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"model": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = generative_service.GenerateAnswerResponse( + answerable_probability=0.234, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = generative_service.GenerateAnswerResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.generate_answer(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, generative_service.GenerateAnswerResponse) + assert math.isclose(response.answerable_probability, 0.234, rel_tol=1e-6) + + +def test_generate_answer_rest_required_fields( + request_type=generative_service.GenerateAnswerRequest, +): + transport_class = transports.GenerativeServiceRestTransport + + request_init = {} + request_init["model"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).generate_answer._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["model"] = "model_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).generate_answer._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "model" in jsonified_request + assert jsonified_request["model"] == "model_value" + + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = generative_service.GenerateAnswerResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = generative_service.GenerateAnswerResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.generate_answer(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_generate_answer_rest_unset_required_fields(): + transport = transports.GenerativeServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.generate_answer._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "model", + "contents", + "answerStyle", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_generate_answer_rest_interceptors(null_interceptor): + transport = transports.GenerativeServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.GenerativeServiceRestInterceptor(), + ) + client = GenerativeServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.GenerativeServiceRestInterceptor, "post_generate_answer" + ) as post, mock.patch.object( + transports.GenerativeServiceRestInterceptor, "pre_generate_answer" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = generative_service.GenerateAnswerRequest.pb( + generative_service.GenerateAnswerRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = generative_service.GenerateAnswerResponse.to_json( + generative_service.GenerateAnswerResponse() + ) + + request = generative_service.GenerateAnswerRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = generative_service.GenerateAnswerResponse() + + client.generate_answer( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_generate_answer_rest_bad_request( + transport: str = "rest", request_type=generative_service.GenerateAnswerRequest +): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"model": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.generate_answer(request) + + +def test_generate_answer_rest_flattened(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = generative_service.GenerateAnswerResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"model": "models/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + safety_settings=[ + safety.SafetySetting( + category=safety.HarmCategory.HARM_CATEGORY_DEROGATORY + ) + ], + answer_style=generative_service.GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE, + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = generative_service.GenerateAnswerResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.generate_answer(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{model=models/*}:generateAnswer" % client.transport._host, + args[1], + ) + + +def test_generate_answer_rest_flattened_error(transport: str = "rest"): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.generate_answer( + generative_service.GenerateAnswerRequest(), + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + safety_settings=[ + safety.SafetySetting( + category=safety.HarmCategory.HARM_CATEGORY_DEROGATORY + ) + ], + answer_style=generative_service.GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE, + ) + + +def test_generate_answer_rest_error(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + generative_service.GenerateContentRequest, + dict, + ], +) +def test_stream_generate_content_rest(request_type): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"model": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = generative_service.GenerateContentResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = generative_service.GenerateContentResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.stream_generate_content(request) + + assert isinstance(response, Iterable) + response = next(response) + + # Establish that the response is the type that we expect. + assert isinstance(response, generative_service.GenerateContentResponse) + + +def test_stream_generate_content_rest_required_fields( + request_type=generative_service.GenerateContentRequest, +): + transport_class = transports.GenerativeServiceRestTransport + + request_init = {} + request_init["model"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).stream_generate_content._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["model"] = "model_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).stream_generate_content._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "model" in jsonified_request + assert jsonified_request["model"] == "model_value" + + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = generative_service.GenerateContentResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = generative_service.GenerateContentResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.stream_generate_content(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_stream_generate_content_rest_unset_required_fields(): + transport = transports.GenerativeServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.stream_generate_content._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "model", + "contents", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_stream_generate_content_rest_interceptors(null_interceptor): + transport = transports.GenerativeServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.GenerativeServiceRestInterceptor(), + ) + client = GenerativeServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.GenerativeServiceRestInterceptor, "post_stream_generate_content" + ) as post, mock.patch.object( + transports.GenerativeServiceRestInterceptor, "pre_stream_generate_content" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = generative_service.GenerateContentRequest.pb( + generative_service.GenerateContentRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = generative_service.GenerateContentResponse.to_json( + generative_service.GenerateContentResponse() + ) + req.return_value._content = "[{}]".format(req.return_value._content) + + request = generative_service.GenerateContentRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = generative_service.GenerateContentResponse() + + client.stream_generate_content( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_stream_generate_content_rest_bad_request( + transport: str = "rest", request_type=generative_service.GenerateContentRequest +): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"model": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.stream_generate_content(request) + + +def test_stream_generate_content_rest_flattened(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = generative_service.GenerateContentResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"model": "models/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = generative_service.GenerateContentResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + client.stream_generate_content(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{model=models/*}:streamGenerateContent" % client.transport._host, + args[1], + ) + + +def test_stream_generate_content_rest_flattened_error(transport: str = "rest"): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.stream_generate_content( + generative_service.GenerateContentRequest(), + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + + +def test_stream_generate_content_rest_error(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + generative_service.EmbedContentRequest, + dict, + ], +) +def test_embed_content_rest(request_type): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"model": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = generative_service.EmbedContentResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = generative_service.EmbedContentResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.embed_content(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, generative_service.EmbedContentResponse) + + +def test_embed_content_rest_required_fields( + request_type=generative_service.EmbedContentRequest, +): + transport_class = transports.GenerativeServiceRestTransport + + request_init = {} + request_init["model"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).embed_content._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["model"] = "model_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).embed_content._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "model" in jsonified_request + assert jsonified_request["model"] == "model_value" + + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = generative_service.EmbedContentResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = generative_service.EmbedContentResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.embed_content(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_embed_content_rest_unset_required_fields(): + transport = transports.GenerativeServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.embed_content._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "model", + "content", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_embed_content_rest_interceptors(null_interceptor): + transport = transports.GenerativeServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.GenerativeServiceRestInterceptor(), + ) + client = GenerativeServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.GenerativeServiceRestInterceptor, "post_embed_content" + ) as post, mock.patch.object( + transports.GenerativeServiceRestInterceptor, "pre_embed_content" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = generative_service.EmbedContentRequest.pb( + generative_service.EmbedContentRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = generative_service.EmbedContentResponse.to_json( + generative_service.EmbedContentResponse() + ) + + request = generative_service.EmbedContentRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = generative_service.EmbedContentResponse() + + client.embed_content( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_embed_content_rest_bad_request( + transport: str = "rest", request_type=generative_service.EmbedContentRequest +): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"model": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.embed_content(request) + + +def test_embed_content_rest_flattened(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = generative_service.EmbedContentResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"model": "models/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + model="model_value", + content=gag_content.Content(parts=[gag_content.Part(text="text_value")]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = generative_service.EmbedContentResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.embed_content(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{model=models/*}:embedContent" % client.transport._host, args[1] + ) + + +def test_embed_content_rest_flattened_error(transport: str = "rest"): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.embed_content( + generative_service.EmbedContentRequest(), + model="model_value", + content=gag_content.Content(parts=[gag_content.Part(text="text_value")]), + ) + + +def test_embed_content_rest_error(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + generative_service.BatchEmbedContentsRequest, + dict, + ], +) +def test_batch_embed_contents_rest(request_type): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"model": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = generative_service.BatchEmbedContentsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = generative_service.BatchEmbedContentsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.batch_embed_contents(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, generative_service.BatchEmbedContentsResponse) + + +def test_batch_embed_contents_rest_required_fields( + request_type=generative_service.BatchEmbedContentsRequest, +): + transport_class = transports.GenerativeServiceRestTransport + + request_init = {} + request_init["model"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).batch_embed_contents._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["model"] = "model_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).batch_embed_contents._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "model" in jsonified_request + assert jsonified_request["model"] == "model_value" + + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = generative_service.BatchEmbedContentsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = generative_service.BatchEmbedContentsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.batch_embed_contents(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_batch_embed_contents_rest_unset_required_fields(): + transport = transports.GenerativeServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.batch_embed_contents._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "model", + "requests", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_batch_embed_contents_rest_interceptors(null_interceptor): + transport = transports.GenerativeServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.GenerativeServiceRestInterceptor(), + ) + client = GenerativeServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.GenerativeServiceRestInterceptor, "post_batch_embed_contents" + ) as post, mock.patch.object( + transports.GenerativeServiceRestInterceptor, "pre_batch_embed_contents" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = generative_service.BatchEmbedContentsRequest.pb( + generative_service.BatchEmbedContentsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = ( + generative_service.BatchEmbedContentsResponse.to_json( + generative_service.BatchEmbedContentsResponse() + ) + ) + + request = generative_service.BatchEmbedContentsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = generative_service.BatchEmbedContentsResponse() + + client.batch_embed_contents( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_batch_embed_contents_rest_bad_request( + transport: str = "rest", request_type=generative_service.BatchEmbedContentsRequest +): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"model": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.batch_embed_contents(request) + + +def test_batch_embed_contents_rest_flattened(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = generative_service.BatchEmbedContentsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"model": "models/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + model="model_value", + requests=[generative_service.EmbedContentRequest(model="model_value")], + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = generative_service.BatchEmbedContentsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.batch_embed_contents(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{model=models/*}:batchEmbedContents" % client.transport._host, + args[1], + ) + + +def test_batch_embed_contents_rest_flattened_error(transport: str = "rest"): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_embed_contents( + generative_service.BatchEmbedContentsRequest(), + model="model_value", + requests=[generative_service.EmbedContentRequest(model="model_value")], + ) + + +def test_batch_embed_contents_rest_error(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + generative_service.CountTokensRequest, + dict, + ], +) +def test_count_tokens_rest(request_type): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"model": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = generative_service.CountTokensResponse( + total_tokens=1303, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = generative_service.CountTokensResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.count_tokens(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, generative_service.CountTokensResponse) + assert response.total_tokens == 1303 + + +def test_count_tokens_rest_required_fields( + request_type=generative_service.CountTokensRequest, +): + transport_class = transports.GenerativeServiceRestTransport + + request_init = {} + request_init["model"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).count_tokens._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["model"] = "model_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).count_tokens._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "model" in jsonified_request + assert jsonified_request["model"] == "model_value" + + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = generative_service.CountTokensResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = generative_service.CountTokensResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.count_tokens(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_count_tokens_rest_unset_required_fields(): + transport = transports.GenerativeServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.count_tokens._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "model", + "contents", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_count_tokens_rest_interceptors(null_interceptor): + transport = transports.GenerativeServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.GenerativeServiceRestInterceptor(), + ) + client = GenerativeServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.GenerativeServiceRestInterceptor, "post_count_tokens" + ) as post, mock.patch.object( + transports.GenerativeServiceRestInterceptor, "pre_count_tokens" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = generative_service.CountTokensRequest.pb( + generative_service.CountTokensRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = generative_service.CountTokensResponse.to_json( + generative_service.CountTokensResponse() + ) + + request = generative_service.CountTokensRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = generative_service.CountTokensResponse() + + client.count_tokens( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_count_tokens_rest_bad_request( + transport: str = "rest", request_type=generative_service.CountTokensRequest +): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"model": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.count_tokens(request) + + +def test_count_tokens_rest_flattened(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = generative_service.CountTokensResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"model": "models/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = generative_service.CountTokensResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.count_tokens(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{model=models/*}:countTokens" % client.transport._host, args[1] + ) + + +def test_count_tokens_rest_flattened_error(transport: str = "rest"): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.count_tokens( + generative_service.CountTokensRequest(), + model="model_value", + contents=[content.Content(parts=[content.Part(text="text_value")])], + ) + + +def test_count_tokens_rest_error(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.GenerativeServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.GenerativeServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = GenerativeServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.GenerativeServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = GenerativeServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = GenerativeServiceClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.GenerativeServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = GenerativeServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.GenerativeServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = GenerativeServiceClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.GenerativeServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.GenerativeServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.GenerativeServiceGrpcTransport, + transports.GenerativeServiceGrpcAsyncIOTransport, + transports.GenerativeServiceRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "rest", + ], +) +def test_transport_kind(transport_name): + transport = GenerativeServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.GenerativeServiceGrpcTransport, + ) + + +def test_generative_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.GenerativeServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_generative_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.ai.generativelanguage_v1beta.services.generative_service.transports.GenerativeServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.GenerativeServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "generate_content", + "generate_answer", + "stream_generate_content", + "embed_content", + "batch_embed_contents", + "count_tokens", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_generative_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch( + "google.ai.generativelanguage_v1beta.services.generative_service.transports.GenerativeServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.GenerativeServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=None, + default_scopes=(), + quota_project_id="octopus", + ) + + +def test_generative_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( + "google.ai.generativelanguage_v1beta.services.generative_service.transports.GenerativeServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.GenerativeServiceTransport() + adc.assert_called_once() + + +def test_generative_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + GenerativeServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=(), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.GenerativeServiceGrpcTransport, + transports.GenerativeServiceGrpcAsyncIOTransport, + ], +) +def test_generative_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=(), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.GenerativeServiceGrpcTransport, + transports.GenerativeServiceGrpcAsyncIOTransport, + transports.GenerativeServiceRestTransport, + ], +) +def test_generative_service_transport_auth_gdch_credentials(transport_class): + host = "https://language.com" + api_audience_tests = [None, "https://language2.com"] + api_audience_expect = [host, "https://language2.com"] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, "default", autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock( + return_value=gdch_mock + ) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with(e) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.GenerativeServiceGrpcTransport, grpc_helpers), + (transports.GenerativeServiceGrpcAsyncIOTransport, grpc_helpers_async), + ], +) +def test_generative_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + + create_channel.assert_called_with( + "generativelanguage.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=(), + scopes=["1", "2"], + default_host="generativelanguage.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.GenerativeServiceGrpcTransport, + transports.GenerativeServiceGrpcAsyncIOTransport, + ], +) +def test_generative_service_grpc_transport_client_cert_source_for_mtls(transport_class): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_generative_service_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ) as mock_configure_mtls_channel: + transports.GenerativeServiceRestTransport( + credentials=cred, client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_generative_service_host_no_port(transport_name): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="generativelanguage.googleapis.com" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "generativelanguage.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://generativelanguage.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_generative_service_host_with_port(transport_name): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="generativelanguage.googleapis.com:8000" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "generativelanguage.googleapis.com:8000" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://generativelanguage.googleapis.com:8000" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "rest", + ], +) +def test_generative_service_client_transport_session_collision(transport_name): + creds1 = ga_credentials.AnonymousCredentials() + creds2 = ga_credentials.AnonymousCredentials() + client1 = GenerativeServiceClient( + credentials=creds1, + transport=transport_name, + ) + client2 = GenerativeServiceClient( + credentials=creds2, + transport=transport_name, + ) + session1 = client1.transport.generate_content._session + session2 = client2.transport.generate_content._session + assert session1 != session2 + session1 = client1.transport.generate_answer._session + session2 = client2.transport.generate_answer._session + assert session1 != session2 + session1 = client1.transport.stream_generate_content._session + session2 = client2.transport.stream_generate_content._session + assert session1 != session2 + session1 = client1.transport.embed_content._session + session2 = client2.transport.embed_content._session + assert session1 != session2 + session1 = client1.transport.batch_embed_contents._session + session2 = client2.transport.batch_embed_contents._session + assert session1 != session2 + session1 = client1.transport.count_tokens._session + session2 = client2.transport.count_tokens._session + assert session1 != session2 + + +def test_generative_service_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.GenerativeServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_generative_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.GenerativeServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.GenerativeServiceGrpcTransport, + transports.GenerativeServiceGrpcAsyncIOTransport, + ], +) +def test_generative_service_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.GenerativeServiceGrpcTransport, + transports.GenerativeServiceGrpcAsyncIOTransport, + ], +) +def test_generative_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_model_path(): + model = "squid" + expected = "models/{model}".format( + model=model, + ) + actual = GenerativeServiceClient.model_path(model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "model": "clam", + } + path = GenerativeServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = GenerativeServiceClient.parse_model_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "whelk" + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = GenerativeServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "octopus", + } + path = GenerativeServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = GenerativeServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "oyster" + expected = "folders/{folder}".format( + folder=folder, + ) + actual = GenerativeServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nudibranch", + } + path = GenerativeServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = GenerativeServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "cuttlefish" + expected = "organizations/{organization}".format( + organization=organization, + ) + actual = GenerativeServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "mussel", + } + path = GenerativeServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = GenerativeServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "winkle" + expected = "projects/{project}".format( + project=project, + ) + actual = GenerativeServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "nautilus", + } + path = GenerativeServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = GenerativeServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "scallop" + location = "abalone" + expected = "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + actual = GenerativeServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "squid", + "location": "clam", + } + path = GenerativeServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = GenerativeServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.GenerativeServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.GenerativeServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = GenerativeServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = GenerativeServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "rest": "_session", + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "rest", + "grpc", + ] + for transport in transports: + client = GenerativeServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + + +@pytest.mark.parametrize( + "client_class,transport_class", + [ + (GenerativeServiceClient, transports.GenerativeServiceGrpcTransport), + ( + GenerativeServiceAsyncClient, + transports.GenerativeServiceGrpcAsyncIOTransport, + ), + ], +) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) diff --git a/packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1beta/test_model_service.py b/packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1beta/test_model_service.py new file mode 100644 index 000000000000..81074d597f5c --- /dev/null +++ b/packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1beta/test_model_service.py @@ -0,0 +1,5648 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER + import mock + +from collections.abc import Iterable +import json +import math + +from google.api_core import ( + future, + gapic_v1, + grpc_helpers, + grpc_helpers_async, + operation, + operations_v1, + path_template, +) +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import operation_async # type: ignore +import google.auth +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import json_format +from google.protobuf import timestamp_pb2 # type: ignore +import grpc +from grpc.experimental import aio +from proto.marshal.rules import wrappers +from proto.marshal.rules.dates import DurationRule, TimestampRule +import pytest +from requests import PreparedRequest, Request, Response +from requests.sessions import Session + +from google.ai.generativelanguage_v1beta.services.model_service import ( + ModelServiceAsyncClient, + ModelServiceClient, + pagers, + transports, +) +from google.ai.generativelanguage_v1beta.types import tuned_model as gag_tuned_model +from google.ai.generativelanguage_v1beta.types import model, model_service +from google.ai.generativelanguage_v1beta.types import tuned_model + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert ModelServiceClient._get_default_mtls_endpoint(None) is None + assert ( + ModelServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + ) + assert ( + ModelServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + ModelServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + ModelServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ModelServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (ModelServiceClient, "grpc"), + (ModelServiceAsyncClient, "grpc_asyncio"), + (ModelServiceClient, "rest"), + ], +) +def test_model_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "generativelanguage.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://generativelanguage.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.ModelServiceGrpcTransport, "grpc"), + (transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio"), + (transports.ModelServiceRestTransport, "rest"), + ], +) +def test_model_service_client_service_account_always_use_jwt( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (ModelServiceClient, "grpc"), + (ModelServiceAsyncClient, "grpc_asyncio"), + (ModelServiceClient, "rest"), + ], +) +def test_model_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "generativelanguage.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://generativelanguage.googleapis.com" + ) + + +def test_model_service_client_get_transport_class(): + transport = ModelServiceClient.get_transport_class() + available_transports = [ + transports.ModelServiceGrpcTransport, + transports.ModelServiceRestTransport, + ] + assert transport in available_transports + + transport = ModelServiceClient.get_transport_class("grpc") + assert transport == transports.ModelServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc"), + ( + ModelServiceAsyncClient, + transports.ModelServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + (ModelServiceClient, transports.ModelServiceRestTransport, "rest"), + ], +) +@mock.patch.object( + ModelServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceClient) +) +@mock.patch.object( + ModelServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ModelServiceAsyncClient), +) +def test_model_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(ModelServiceClient, "get_transport_class") as gtc: + transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(ModelServiceClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions( + api_audience="https://language.googleapis.com" + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com", + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc", "true"), + ( + ModelServiceAsyncClient, + transports.ModelServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc", "false"), + ( + ModelServiceAsyncClient, + transports.ModelServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + (ModelServiceClient, transports.ModelServiceRestTransport, "rest", "true"), + (ModelServiceClient, transports.ModelServiceRestTransport, "rest", "false"), + ], +) +@mock.patch.object( + ModelServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceClient) +) +@mock.patch.object( + ModelServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ModelServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_model_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize("client_class", [ModelServiceClient, ModelServiceAsyncClient]) +@mock.patch.object( + ModelServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceClient) +) +@mock.patch.object( + ModelServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ModelServiceAsyncClient), +) +def test_model_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_client_cert_source, + ): + ( + api_endpoint, + cert_source, + ) = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc"), + ( + ModelServiceAsyncClient, + transports.ModelServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + (ModelServiceClient, transports.ModelServiceRestTransport, "rest"), + ], +) +def test_model_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + ModelServiceClient, + transports.ModelServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + ModelServiceAsyncClient, + transports.ModelServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + (ModelServiceClient, transports.ModelServiceRestTransport, "rest", None), + ], +) +def test_model_service_client_client_options_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +def test_model_service_client_client_options_from_dict(): + with mock.patch( + "google.ai.generativelanguage_v1beta.services.model_service.transports.ModelServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = ModelServiceClient(client_options={"api_endpoint": "squid.clam.whelk"}) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + ModelServiceClient, + transports.ModelServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + ModelServiceAsyncClient, + transports.ModelServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ], +) +def test_model_service_client_create_channel_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "generativelanguage.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=(), + scopes=None, + default_host="generativelanguage.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + model_service.GetModelRequest, + dict, + ], +) +def test_get_model(request_type, transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model( + name="name_value", + base_model_id="base_model_id_value", + version="version_value", + display_name="display_name_value", + description="description_value", + input_token_limit=1838, + output_token_limit=1967, + supported_generation_methods=["supported_generation_methods_value"], + temperature=0.1198, + top_p=0.546, + top_k=541, + ) + response = client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model.Model) + assert response.name == "name_value" + assert response.base_model_id == "base_model_id_value" + assert response.version == "version_value" + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert response.input_token_limit == 1838 + assert response.output_token_limit == 1967 + assert response.supported_generation_methods == [ + "supported_generation_methods_value" + ] + assert math.isclose(response.temperature, 0.1198, rel_tol=1e-6) + assert math.isclose(response.top_p, 0.546, rel_tol=1e-6) + assert response.top_k == 541 + + +def test_get_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_model), "__call__") as call: + client.get_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelRequest() + + +@pytest.mark.asyncio +async def test_get_model_async( + transport: str = "grpc_asyncio", request_type=model_service.GetModelRequest +): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model.Model( + name="name_value", + base_model_id="base_model_id_value", + version="version_value", + display_name="display_name_value", + description="description_value", + input_token_limit=1838, + output_token_limit=1967, + supported_generation_methods=["supported_generation_methods_value"], + temperature=0.1198, + top_p=0.546, + top_k=541, + ) + ) + response = await client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model.Model) + assert response.name == "name_value" + assert response.base_model_id == "base_model_id_value" + assert response.version == "version_value" + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert response.input_token_limit == 1838 + assert response.output_token_limit == 1967 + assert response.supported_generation_methods == [ + "supported_generation_methods_value" + ] + assert math.isclose(response.temperature, 0.1198, rel_tol=1e-6) + assert math.isclose(response.top_p, 0.546, rel_tol=1e-6) + assert response.top_k == 541 + + +@pytest.mark.asyncio +async def test_get_model_async_from_dict(): + await test_get_model_async(request_type=dict) + + +def test_get_model_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.GetModelRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_model), "__call__") as call: + call.return_value = model.Model() + client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_model_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.GetModelRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_model), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) + await client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_model_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_model( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_model_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model( + model_service.GetModelRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_model_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_model( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_model_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_model( + model_service.GetModelRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + model_service.ListModelsRequest, + dict, + ], +) +def test_list_models(request_type, transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_models), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelsResponse( + next_page_token="next_page_token_value", + ) + response = client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_models_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_models), "__call__") as call: + client.list_models() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelsRequest() + + +@pytest.mark.asyncio +async def test_list_models_async( + transport: str = "grpc_asyncio", request_type=model_service.ListModelsRequest +): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_models), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_service.ListModelsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelsAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_models_async_from_dict(): + await test_list_models_async(request_type=dict) + + +def test_list_models_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_models), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_models( + page_size=951, + page_token="page_token_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].page_size + mock_val = 951 + assert arg == mock_val + arg = args[0].page_token + mock_val = "page_token_value" + assert arg == mock_val + + +def test_list_models_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_models( + model_service.ListModelsRequest(), + page_size=951, + page_token="page_token_value", + ) + + +@pytest.mark.asyncio +async def test_list_models_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_models), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_service.ListModelsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_models( + page_size=951, + page_token="page_token_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].page_size + mock_val = 951 + assert arg == mock_val + arg = args[0].page_token + mock_val = "page_token_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_models_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_models( + model_service.ListModelsRequest(), + page_size=951, + page_token="page_token_value", + ) + + +def test_list_models_pager(transport_name: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_models), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token="abc", + ), + model_service.ListModelsResponse( + models=[], + next_page_token="def", + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + ], + next_page_token="ghi", + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + + metadata = () + pager = client.list_models(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, model.Model) for i in results) + + +def test_list_models_pages(transport_name: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_models), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token="abc", + ), + model_service.ListModelsResponse( + models=[], + next_page_token="def", + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + ], + next_page_token="ghi", + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + pages = list(client.list_models(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_models_async_pager(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token="abc", + ), + model_service.ListModelsResponse( + models=[], + next_page_token="def", + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + ], + next_page_token="ghi", + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_models( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, model.Model) for i in responses) + + +@pytest.mark.asyncio +async def test_list_models_async_pages(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token="abc", + ), + model_service.ListModelsResponse( + models=[], + next_page_token="def", + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + ], + next_page_token="ghi", + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_models(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + model_service.GetTunedModelRequest, + dict, + ], +) +def test_get_tuned_model(request_type, transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_tuned_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = tuned_model.TunedModel( + name="name_value", + display_name="display_name_value", + description="description_value", + temperature=0.1198, + top_p=0.546, + top_k=541, + state=tuned_model.TunedModel.State.CREATING, + base_model="base_model_value", + ) + response = client.get_tuned_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetTunedModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tuned_model.TunedModel) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert math.isclose(response.temperature, 0.1198, rel_tol=1e-6) + assert math.isclose(response.top_p, 0.546, rel_tol=1e-6) + assert response.top_k == 541 + assert response.state == tuned_model.TunedModel.State.CREATING + + +def test_get_tuned_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_tuned_model), "__call__") as call: + client.get_tuned_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetTunedModelRequest() + + +@pytest.mark.asyncio +async def test_get_tuned_model_async( + transport: str = "grpc_asyncio", request_type=model_service.GetTunedModelRequest +): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_tuned_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tuned_model.TunedModel( + name="name_value", + display_name="display_name_value", + description="description_value", + temperature=0.1198, + top_p=0.546, + top_k=541, + state=tuned_model.TunedModel.State.CREATING, + ) + ) + response = await client.get_tuned_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetTunedModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tuned_model.TunedModel) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert math.isclose(response.temperature, 0.1198, rel_tol=1e-6) + assert math.isclose(response.top_p, 0.546, rel_tol=1e-6) + assert response.top_k == 541 + assert response.state == tuned_model.TunedModel.State.CREATING + + +@pytest.mark.asyncio +async def test_get_tuned_model_async_from_dict(): + await test_get_tuned_model_async(request_type=dict) + + +def test_get_tuned_model_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.GetTunedModelRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_tuned_model), "__call__") as call: + call.return_value = tuned_model.TunedModel() + client.get_tuned_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_tuned_model_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.GetTunedModelRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_tuned_model), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tuned_model.TunedModel() + ) + await client.get_tuned_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_tuned_model_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_tuned_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = tuned_model.TunedModel() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_tuned_model( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_tuned_model_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_tuned_model( + model_service.GetTunedModelRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_tuned_model_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_tuned_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = tuned_model.TunedModel() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tuned_model.TunedModel() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_tuned_model( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_tuned_model_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_tuned_model( + model_service.GetTunedModelRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + model_service.ListTunedModelsRequest, + dict, + ], +) +def test_list_tuned_models(request_type, transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tuned_models), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListTunedModelsResponse( + next_page_token="next_page_token_value", + ) + response = client.list_tuned_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListTunedModelsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTunedModelsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_tuned_models_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tuned_models), "__call__" + ) as call: + client.list_tuned_models() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListTunedModelsRequest() + + +@pytest.mark.asyncio +async def test_list_tuned_models_async( + transport: str = "grpc_asyncio", request_type=model_service.ListTunedModelsRequest +): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tuned_models), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_service.ListTunedModelsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_tuned_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListTunedModelsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTunedModelsAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_tuned_models_async_from_dict(): + await test_list_tuned_models_async(request_type=dict) + + +def test_list_tuned_models_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tuned_models), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListTunedModelsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_tuned_models( + page_size=951, + page_token="page_token_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].page_size + mock_val = 951 + assert arg == mock_val + arg = args[0].page_token + mock_val = "page_token_value" + assert arg == mock_val + + +def test_list_tuned_models_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_tuned_models( + model_service.ListTunedModelsRequest(), + page_size=951, + page_token="page_token_value", + ) + + +@pytest.mark.asyncio +async def test_list_tuned_models_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tuned_models), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListTunedModelsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_service.ListTunedModelsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_tuned_models( + page_size=951, + page_token="page_token_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].page_size + mock_val = 951 + assert arg == mock_val + arg = args[0].page_token + mock_val = "page_token_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_tuned_models_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_tuned_models( + model_service.ListTunedModelsRequest(), + page_size=951, + page_token="page_token_value", + ) + + +def test_list_tuned_models_pager(transport_name: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tuned_models), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListTunedModelsResponse( + tuned_models=[ + tuned_model.TunedModel(), + tuned_model.TunedModel(), + tuned_model.TunedModel(), + ], + next_page_token="abc", + ), + model_service.ListTunedModelsResponse( + tuned_models=[], + next_page_token="def", + ), + model_service.ListTunedModelsResponse( + tuned_models=[ + tuned_model.TunedModel(), + ], + next_page_token="ghi", + ), + model_service.ListTunedModelsResponse( + tuned_models=[ + tuned_model.TunedModel(), + tuned_model.TunedModel(), + ], + ), + RuntimeError, + ) + + metadata = () + pager = client.list_tuned_models(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, tuned_model.TunedModel) for i in results) + + +def test_list_tuned_models_pages(transport_name: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tuned_models), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListTunedModelsResponse( + tuned_models=[ + tuned_model.TunedModel(), + tuned_model.TunedModel(), + tuned_model.TunedModel(), + ], + next_page_token="abc", + ), + model_service.ListTunedModelsResponse( + tuned_models=[], + next_page_token="def", + ), + model_service.ListTunedModelsResponse( + tuned_models=[ + tuned_model.TunedModel(), + ], + next_page_token="ghi", + ), + model_service.ListTunedModelsResponse( + tuned_models=[ + tuned_model.TunedModel(), + tuned_model.TunedModel(), + ], + ), + RuntimeError, + ) + pages = list(client.list_tuned_models(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_tuned_models_async_pager(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tuned_models), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListTunedModelsResponse( + tuned_models=[ + tuned_model.TunedModel(), + tuned_model.TunedModel(), + tuned_model.TunedModel(), + ], + next_page_token="abc", + ), + model_service.ListTunedModelsResponse( + tuned_models=[], + next_page_token="def", + ), + model_service.ListTunedModelsResponse( + tuned_models=[ + tuned_model.TunedModel(), + ], + next_page_token="ghi", + ), + model_service.ListTunedModelsResponse( + tuned_models=[ + tuned_model.TunedModel(), + tuned_model.TunedModel(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_tuned_models( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, tuned_model.TunedModel) for i in responses) + + +@pytest.mark.asyncio +async def test_list_tuned_models_async_pages(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tuned_models), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListTunedModelsResponse( + tuned_models=[ + tuned_model.TunedModel(), + tuned_model.TunedModel(), + tuned_model.TunedModel(), + ], + next_page_token="abc", + ), + model_service.ListTunedModelsResponse( + tuned_models=[], + next_page_token="def", + ), + model_service.ListTunedModelsResponse( + tuned_models=[ + tuned_model.TunedModel(), + ], + next_page_token="ghi", + ), + model_service.ListTunedModelsResponse( + tuned_models=[ + tuned_model.TunedModel(), + tuned_model.TunedModel(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_tuned_models(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + model_service.CreateTunedModelRequest, + dict, + ], +) +def test_create_tuned_model(request_type, transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tuned_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.create_tuned_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.CreateTunedModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_tuned_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tuned_model), "__call__" + ) as call: + client.create_tuned_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.CreateTunedModelRequest() + + +@pytest.mark.asyncio +async def test_create_tuned_model_async( + transport: str = "grpc_asyncio", request_type=model_service.CreateTunedModelRequest +): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tuned_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_tuned_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.CreateTunedModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_tuned_model_async_from_dict(): + await test_create_tuned_model_async(request_type=dict) + + +def test_create_tuned_model_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tuned_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_tuned_model( + tuned_model=gag_tuned_model.TunedModel( + tuned_model_source=gag_tuned_model.TunedModelSource( + tuned_model="tuned_model_value" + ) + ), + tuned_model_id="tuned_model_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].tuned_model + mock_val = gag_tuned_model.TunedModel( + tuned_model_source=gag_tuned_model.TunedModelSource( + tuned_model="tuned_model_value" + ) + ) + assert arg == mock_val + arg = args[0].tuned_model_id + mock_val = "tuned_model_id_value" + assert arg == mock_val + + +def test_create_tuned_model_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_tuned_model( + model_service.CreateTunedModelRequest(), + tuned_model=gag_tuned_model.TunedModel( + tuned_model_source=gag_tuned_model.TunedModelSource( + tuned_model="tuned_model_value" + ) + ), + tuned_model_id="tuned_model_id_value", + ) + + +@pytest.mark.asyncio +async def test_create_tuned_model_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tuned_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_tuned_model( + tuned_model=gag_tuned_model.TunedModel( + tuned_model_source=gag_tuned_model.TunedModelSource( + tuned_model="tuned_model_value" + ) + ), + tuned_model_id="tuned_model_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].tuned_model + mock_val = gag_tuned_model.TunedModel( + tuned_model_source=gag_tuned_model.TunedModelSource( + tuned_model="tuned_model_value" + ) + ) + assert arg == mock_val + arg = args[0].tuned_model_id + mock_val = "tuned_model_id_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_create_tuned_model_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_tuned_model( + model_service.CreateTunedModelRequest(), + tuned_model=gag_tuned_model.TunedModel( + tuned_model_source=gag_tuned_model.TunedModelSource( + tuned_model="tuned_model_value" + ) + ), + tuned_model_id="tuned_model_id_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + model_service.UpdateTunedModelRequest, + dict, + ], +) +def test_update_tuned_model(request_type, transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tuned_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gag_tuned_model.TunedModel( + name="name_value", + display_name="display_name_value", + description="description_value", + temperature=0.1198, + top_p=0.546, + top_k=541, + state=gag_tuned_model.TunedModel.State.CREATING, + base_model="base_model_value", + ) + response = client.update_tuned_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.UpdateTunedModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gag_tuned_model.TunedModel) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert math.isclose(response.temperature, 0.1198, rel_tol=1e-6) + assert math.isclose(response.top_p, 0.546, rel_tol=1e-6) + assert response.top_k == 541 + assert response.state == gag_tuned_model.TunedModel.State.CREATING + + +def test_update_tuned_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tuned_model), "__call__" + ) as call: + client.update_tuned_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.UpdateTunedModelRequest() + + +@pytest.mark.asyncio +async def test_update_tuned_model_async( + transport: str = "grpc_asyncio", request_type=model_service.UpdateTunedModelRequest +): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tuned_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gag_tuned_model.TunedModel( + name="name_value", + display_name="display_name_value", + description="description_value", + temperature=0.1198, + top_p=0.546, + top_k=541, + state=gag_tuned_model.TunedModel.State.CREATING, + ) + ) + response = await client.update_tuned_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.UpdateTunedModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gag_tuned_model.TunedModel) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert math.isclose(response.temperature, 0.1198, rel_tol=1e-6) + assert math.isclose(response.top_p, 0.546, rel_tol=1e-6) + assert response.top_k == 541 + assert response.state == gag_tuned_model.TunedModel.State.CREATING + + +@pytest.mark.asyncio +async def test_update_tuned_model_async_from_dict(): + await test_update_tuned_model_async(request_type=dict) + + +def test_update_tuned_model_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.UpdateTunedModelRequest() + + request.tuned_model.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tuned_model), "__call__" + ) as call: + call.return_value = gag_tuned_model.TunedModel() + client.update_tuned_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "tuned_model.name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_tuned_model_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.UpdateTunedModelRequest() + + request.tuned_model.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tuned_model), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gag_tuned_model.TunedModel() + ) + await client.update_tuned_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "tuned_model.name=name_value", + ) in kw["metadata"] + + +def test_update_tuned_model_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tuned_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gag_tuned_model.TunedModel() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_tuned_model( + tuned_model=gag_tuned_model.TunedModel( + tuned_model_source=gag_tuned_model.TunedModelSource( + tuned_model="tuned_model_value" + ) + ), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].tuned_model + mock_val = gag_tuned_model.TunedModel( + tuned_model_source=gag_tuned_model.TunedModelSource( + tuned_model="tuned_model_value" + ) + ) + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +def test_update_tuned_model_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_tuned_model( + model_service.UpdateTunedModelRequest(), + tuned_model=gag_tuned_model.TunedModel( + tuned_model_source=gag_tuned_model.TunedModelSource( + tuned_model="tuned_model_value" + ) + ), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_tuned_model_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tuned_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gag_tuned_model.TunedModel() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gag_tuned_model.TunedModel() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_tuned_model( + tuned_model=gag_tuned_model.TunedModel( + tuned_model_source=gag_tuned_model.TunedModelSource( + tuned_model="tuned_model_value" + ) + ), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].tuned_model + mock_val = gag_tuned_model.TunedModel( + tuned_model_source=gag_tuned_model.TunedModelSource( + tuned_model="tuned_model_value" + ) + ) + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_update_tuned_model_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_tuned_model( + model_service.UpdateTunedModelRequest(), + tuned_model=gag_tuned_model.TunedModel( + tuned_model_source=gag_tuned_model.TunedModelSource( + tuned_model="tuned_model_value" + ) + ), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + model_service.DeleteTunedModelRequest, + dict, + ], +) +def test_delete_tuned_model(request_type, transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tuned_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_tuned_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.DeleteTunedModelRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_tuned_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tuned_model), "__call__" + ) as call: + client.delete_tuned_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.DeleteTunedModelRequest() + + +@pytest.mark.asyncio +async def test_delete_tuned_model_async( + transport: str = "grpc_asyncio", request_type=model_service.DeleteTunedModelRequest +): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tuned_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_tuned_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.DeleteTunedModelRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_tuned_model_async_from_dict(): + await test_delete_tuned_model_async(request_type=dict) + + +def test_delete_tuned_model_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.DeleteTunedModelRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tuned_model), "__call__" + ) as call: + call.return_value = None + client.delete_tuned_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_tuned_model_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.DeleteTunedModelRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tuned_model), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_tuned_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_delete_tuned_model_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tuned_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_tuned_model( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_delete_tuned_model_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_tuned_model( + model_service.DeleteTunedModelRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_tuned_model_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tuned_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_tuned_model( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_delete_tuned_model_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_tuned_model( + model_service.DeleteTunedModelRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + model_service.GetModelRequest, + dict, + ], +) +def test_get_model_rest(request_type): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = model.Model( + name="name_value", + base_model_id="base_model_id_value", + version="version_value", + display_name="display_name_value", + description="description_value", + input_token_limit=1838, + output_token_limit=1967, + supported_generation_methods=["supported_generation_methods_value"], + temperature=0.1198, + top_p=0.546, + top_k=541, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = model.Model.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_model(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, model.Model) + assert response.name == "name_value" + assert response.base_model_id == "base_model_id_value" + assert response.version == "version_value" + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert response.input_token_limit == 1838 + assert response.output_token_limit == 1967 + assert response.supported_generation_methods == [ + "supported_generation_methods_value" + ] + assert math.isclose(response.temperature, 0.1198, rel_tol=1e-6) + assert math.isclose(response.top_p, 0.546, rel_tol=1e-6) + assert response.top_k == 541 + + +def test_get_model_rest_required_fields(request_type=model_service.GetModelRequest): + transport_class = transports.ModelServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = model.Model() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = model.Model.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_model(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_model_rest_unset_required_fields(): + transport = transports.ModelServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_model._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_model_rest_interceptors(null_interceptor): + transport = transports.ModelServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ModelServiceRestInterceptor(), + ) + client = ModelServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ModelServiceRestInterceptor, "post_get_model" + ) as post, mock.patch.object( + transports.ModelServiceRestInterceptor, "pre_get_model" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = model_service.GetModelRequest.pb(model_service.GetModelRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = model.Model.to_json(model.Model()) + + request = model_service.GetModelRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = model.Model() + + client.get_model( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_model_rest_bad_request( + transport: str = "rest", request_type=model_service.GetModelRequest +): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_model(request) + + +def test_get_model_rest_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = model.Model() + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "models/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = model.Model.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_model(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{name=models/*}" % client.transport._host, args[1] + ) + + +def test_get_model_rest_flattened_error(transport: str = "rest"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model( + model_service.GetModelRequest(), + name="name_value", + ) + + +def test_get_model_rest_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + model_service.ListModelsRequest, + dict, + ], +) +def test_list_models_rest(request_type): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = model_service.ListModelsResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = model_service.ListModelsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_models(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelsPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_models_rest_interceptors(null_interceptor): + transport = transports.ModelServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ModelServiceRestInterceptor(), + ) + client = ModelServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ModelServiceRestInterceptor, "post_list_models" + ) as post, mock.patch.object( + transports.ModelServiceRestInterceptor, "pre_list_models" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = model_service.ListModelsRequest.pb( + model_service.ListModelsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = model_service.ListModelsResponse.to_json( + model_service.ListModelsResponse() + ) + + request = model_service.ListModelsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = model_service.ListModelsResponse() + + client.list_models( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_models_rest_bad_request( + transport: str = "rest", request_type=model_service.ListModelsRequest +): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_models(request) + + +def test_list_models_rest_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = model_service.ListModelsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {} + + # get truthy value for each flattened field + mock_args = dict( + page_size=951, + page_token="page_token_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = model_service.ListModelsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.list_models(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/models" % client.transport._host, args[1] + ) + + +def test_list_models_rest_flattened_error(transport: str = "rest"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_models( + model_service.ListModelsRequest(), + page_size=951, + page_token="page_token_value", + ) + + +def test_list_models_rest_pager(transport: str = "rest"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token="abc", + ), + model_service.ListModelsResponse( + models=[], + next_page_token="def", + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + ], + next_page_token="ghi", + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(model_service.ListModelsResponse.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {} + + pager = client.list_models(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, model.Model) for i in results) + + pages = list(client.list_models(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + model_service.GetTunedModelRequest, + dict, + ], +) +def test_get_tuned_model_rest(request_type): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "tunedModels/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = tuned_model.TunedModel( + name="name_value", + display_name="display_name_value", + description="description_value", + temperature=0.1198, + top_p=0.546, + top_k=541, + state=tuned_model.TunedModel.State.CREATING, + base_model="base_model_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = tuned_model.TunedModel.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_tuned_model(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, tuned_model.TunedModel) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert math.isclose(response.temperature, 0.1198, rel_tol=1e-6) + assert math.isclose(response.top_p, 0.546, rel_tol=1e-6) + assert response.top_k == 541 + assert response.state == tuned_model.TunedModel.State.CREATING + + +def test_get_tuned_model_rest_required_fields( + request_type=model_service.GetTunedModelRequest, +): + transport_class = transports.ModelServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_tuned_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_tuned_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = tuned_model.TunedModel() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = tuned_model.TunedModel.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_tuned_model(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_tuned_model_rest_unset_required_fields(): + transport = transports.ModelServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_tuned_model._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_tuned_model_rest_interceptors(null_interceptor): + transport = transports.ModelServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ModelServiceRestInterceptor(), + ) + client = ModelServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ModelServiceRestInterceptor, "post_get_tuned_model" + ) as post, mock.patch.object( + transports.ModelServiceRestInterceptor, "pre_get_tuned_model" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = model_service.GetTunedModelRequest.pb( + model_service.GetTunedModelRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = tuned_model.TunedModel.to_json( + tuned_model.TunedModel() + ) + + request = model_service.GetTunedModelRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = tuned_model.TunedModel() + + client.get_tuned_model( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_tuned_model_rest_bad_request( + transport: str = "rest", request_type=model_service.GetTunedModelRequest +): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "tunedModels/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_tuned_model(request) + + +def test_get_tuned_model_rest_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = tuned_model.TunedModel() + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "tunedModels/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = tuned_model.TunedModel.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_tuned_model(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{name=tunedModels/*}" % client.transport._host, args[1] + ) + + +def test_get_tuned_model_rest_flattened_error(transport: str = "rest"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_tuned_model( + model_service.GetTunedModelRequest(), + name="name_value", + ) + + +def test_get_tuned_model_rest_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + model_service.ListTunedModelsRequest, + dict, + ], +) +def test_list_tuned_models_rest(request_type): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = model_service.ListTunedModelsResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = model_service.ListTunedModelsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_tuned_models(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTunedModelsPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_tuned_models_rest_interceptors(null_interceptor): + transport = transports.ModelServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ModelServiceRestInterceptor(), + ) + client = ModelServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ModelServiceRestInterceptor, "post_list_tuned_models" + ) as post, mock.patch.object( + transports.ModelServiceRestInterceptor, "pre_list_tuned_models" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = model_service.ListTunedModelsRequest.pb( + model_service.ListTunedModelsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = model_service.ListTunedModelsResponse.to_json( + model_service.ListTunedModelsResponse() + ) + + request = model_service.ListTunedModelsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = model_service.ListTunedModelsResponse() + + client.list_tuned_models( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_tuned_models_rest_bad_request( + transport: str = "rest", request_type=model_service.ListTunedModelsRequest +): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_tuned_models(request) + + +def test_list_tuned_models_rest_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = model_service.ListTunedModelsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {} + + # get truthy value for each flattened field + mock_args = dict( + page_size=951, + page_token="page_token_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = model_service.ListTunedModelsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.list_tuned_models(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/tunedModels" % client.transport._host, args[1] + ) + + +def test_list_tuned_models_rest_flattened_error(transport: str = "rest"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_tuned_models( + model_service.ListTunedModelsRequest(), + page_size=951, + page_token="page_token_value", + ) + + +def test_list_tuned_models_rest_pager(transport: str = "rest"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + model_service.ListTunedModelsResponse( + tuned_models=[ + tuned_model.TunedModel(), + tuned_model.TunedModel(), + tuned_model.TunedModel(), + ], + next_page_token="abc", + ), + model_service.ListTunedModelsResponse( + tuned_models=[], + next_page_token="def", + ), + model_service.ListTunedModelsResponse( + tuned_models=[ + tuned_model.TunedModel(), + ], + next_page_token="ghi", + ), + model_service.ListTunedModelsResponse( + tuned_models=[ + tuned_model.TunedModel(), + tuned_model.TunedModel(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + model_service.ListTunedModelsResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {} + + pager = client.list_tuned_models(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, tuned_model.TunedModel) for i in results) + + pages = list(client.list_tuned_models(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + model_service.CreateTunedModelRequest, + dict, + ], +) +def test_create_tuned_model_rest(request_type): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {} + request_init["tuned_model"] = { + "tuned_model_source": { + "tuned_model": "tuned_model_value", + "base_model": "base_model_value", + }, + "base_model": "base_model_value", + "name": "name_value", + "display_name": "display_name_value", + "description": "description_value", + "temperature": 0.1198, + "top_p": 0.546, + "top_k": 541, + "state": 1, + "create_time": {"seconds": 751, "nanos": 543}, + "update_time": {}, + "tuning_task": { + "start_time": {}, + "complete_time": {}, + "snapshots": [ + {"step": 444, "epoch": 527, "mean_loss": 0.961, "compute_time": {}} + ], + "training_data": { + "examples": { + "examples": [ + {"text_input": "text_input_value", "output": "output_value"} + ] + } + }, + "hyperparameters": { + "epoch_count": 1175, + "batch_size": 1052, + "learning_rate": 0.1371, + }, + }, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = model_service.CreateTunedModelRequest.meta.fields["tuned_model"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["tuned_model"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["tuned_model"][field])): + del request_init["tuned_model"][field][i][subfield] + else: + del request_init["tuned_model"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.create_tuned_model(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_create_tuned_model_rest_required_fields( + request_type=model_service.CreateTunedModelRequest, +): + transport_class = transports.ModelServiceRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_tuned_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_tuned_model._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("tuned_model_id",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.create_tuned_model(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_tuned_model_rest_unset_required_fields(): + transport = transports.ModelServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_tuned_model._get_unset_required_fields({}) + assert set(unset_fields) == (set(("tunedModelId",)) & set(("tunedModel",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_tuned_model_rest_interceptors(null_interceptor): + transport = transports.ModelServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ModelServiceRestInterceptor(), + ) + client = ModelServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.ModelServiceRestInterceptor, "post_create_tuned_model" + ) as post, mock.patch.object( + transports.ModelServiceRestInterceptor, "pre_create_tuned_model" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = model_service.CreateTunedModelRequest.pb( + model_service.CreateTunedModelRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = model_service.CreateTunedModelRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.create_tuned_model( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_create_tuned_model_rest_bad_request( + transport: str = "rest", request_type=model_service.CreateTunedModelRequest +): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_tuned_model(request) + + +def test_create_tuned_model_rest_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {} + + # get truthy value for each flattened field + mock_args = dict( + tuned_model=gag_tuned_model.TunedModel( + tuned_model_source=gag_tuned_model.TunedModelSource( + tuned_model="tuned_model_value" + ) + ), + tuned_model_id="tuned_model_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.create_tuned_model(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/tunedModels" % client.transport._host, args[1] + ) + + +def test_create_tuned_model_rest_flattened_error(transport: str = "rest"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_tuned_model( + model_service.CreateTunedModelRequest(), + tuned_model=gag_tuned_model.TunedModel( + tuned_model_source=gag_tuned_model.TunedModelSource( + tuned_model="tuned_model_value" + ) + ), + tuned_model_id="tuned_model_id_value", + ) + + +def test_create_tuned_model_rest_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + model_service.UpdateTunedModelRequest, + dict, + ], +) +def test_update_tuned_model_rest(request_type): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"tuned_model": {"name": "tunedModels/sample1"}} + request_init["tuned_model"] = { + "tuned_model_source": { + "tuned_model": "tuned_model_value", + "base_model": "base_model_value", + }, + "base_model": "base_model_value", + "name": "tunedModels/sample1", + "display_name": "display_name_value", + "description": "description_value", + "temperature": 0.1198, + "top_p": 0.546, + "top_k": 541, + "state": 1, + "create_time": {"seconds": 751, "nanos": 543}, + "update_time": {}, + "tuning_task": { + "start_time": {}, + "complete_time": {}, + "snapshots": [ + {"step": 444, "epoch": 527, "mean_loss": 0.961, "compute_time": {}} + ], + "training_data": { + "examples": { + "examples": [ + {"text_input": "text_input_value", "output": "output_value"} + ] + } + }, + "hyperparameters": { + "epoch_count": 1175, + "batch_size": 1052, + "learning_rate": 0.1371, + }, + }, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = model_service.UpdateTunedModelRequest.meta.fields["tuned_model"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["tuned_model"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["tuned_model"][field])): + del request_init["tuned_model"][field][i][subfield] + else: + del request_init["tuned_model"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = gag_tuned_model.TunedModel( + name="name_value", + display_name="display_name_value", + description="description_value", + temperature=0.1198, + top_p=0.546, + top_k=541, + state=gag_tuned_model.TunedModel.State.CREATING, + base_model="base_model_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = gag_tuned_model.TunedModel.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.update_tuned_model(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, gag_tuned_model.TunedModel) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert math.isclose(response.temperature, 0.1198, rel_tol=1e-6) + assert math.isclose(response.top_p, 0.546, rel_tol=1e-6) + assert response.top_k == 541 + assert response.state == gag_tuned_model.TunedModel.State.CREATING + + +def test_update_tuned_model_rest_required_fields( + request_type=model_service.UpdateTunedModelRequest, +): + transport_class = transports.ModelServiceRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_tuned_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_tuned_model._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = gag_tuned_model.TunedModel() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = gag_tuned_model.TunedModel.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.update_tuned_model(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_update_tuned_model_rest_unset_required_fields(): + transport = transports.ModelServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.update_tuned_model._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("updateMask",)) + & set( + ( + "tunedModel", + "updateMask", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_tuned_model_rest_interceptors(null_interceptor): + transport = transports.ModelServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ModelServiceRestInterceptor(), + ) + client = ModelServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ModelServiceRestInterceptor, "post_update_tuned_model" + ) as post, mock.patch.object( + transports.ModelServiceRestInterceptor, "pre_update_tuned_model" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = model_service.UpdateTunedModelRequest.pb( + model_service.UpdateTunedModelRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = gag_tuned_model.TunedModel.to_json( + gag_tuned_model.TunedModel() + ) + + request = model_service.UpdateTunedModelRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = gag_tuned_model.TunedModel() + + client.update_tuned_model( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_update_tuned_model_rest_bad_request( + transport: str = "rest", request_type=model_service.UpdateTunedModelRequest +): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"tuned_model": {"name": "tunedModels/sample1"}} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_tuned_model(request) + + +def test_update_tuned_model_rest_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = gag_tuned_model.TunedModel() + + # get arguments that satisfy an http rule for this method + sample_request = {"tuned_model": {"name": "tunedModels/sample1"}} + + # get truthy value for each flattened field + mock_args = dict( + tuned_model=gag_tuned_model.TunedModel( + tuned_model_source=gag_tuned_model.TunedModelSource( + tuned_model="tuned_model_value" + ) + ), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = gag_tuned_model.TunedModel.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.update_tuned_model(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{tuned_model.name=tunedModels/*}" % client.transport._host, + args[1], + ) + + +def test_update_tuned_model_rest_flattened_error(transport: str = "rest"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_tuned_model( + model_service.UpdateTunedModelRequest(), + tuned_model=gag_tuned_model.TunedModel( + tuned_model_source=gag_tuned_model.TunedModelSource( + tuned_model="tuned_model_value" + ) + ), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_update_tuned_model_rest_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + model_service.DeleteTunedModelRequest, + dict, + ], +) +def test_delete_tuned_model_rest(request_type): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "tunedModels/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.delete_tuned_model(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_tuned_model_rest_required_fields( + request_type=model_service.DeleteTunedModelRequest, +): + transport_class = transports.ModelServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_tuned_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_tuned_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.delete_tuned_model(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_tuned_model_rest_unset_required_fields(): + transport = transports.ModelServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_tuned_model._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_tuned_model_rest_interceptors(null_interceptor): + transport = transports.ModelServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ModelServiceRestInterceptor(), + ) + client = ModelServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ModelServiceRestInterceptor, "pre_delete_tuned_model" + ) as pre: + pre.assert_not_called() + pb_message = model_service.DeleteTunedModelRequest.pb( + model_service.DeleteTunedModelRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + + request = model_service.DeleteTunedModelRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.delete_tuned_model( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + + +def test_delete_tuned_model_rest_bad_request( + transport: str = "rest", request_type=model_service.DeleteTunedModelRequest +): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "tunedModels/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_tuned_model(request) + + +def test_delete_tuned_model_rest_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "tunedModels/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.delete_tuned_model(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{name=tunedModels/*}" % client.transport._host, args[1] + ) + + +def test_delete_tuned_model_rest_flattened_error(transport: str = "rest"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_tuned_model( + model_service.DeleteTunedModelRequest(), + name="name_value", + ) + + +def test_delete_tuned_model_rest_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.ModelServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.ModelServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ModelServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.ModelServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = ModelServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = ModelServiceClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.ModelServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ModelServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.ModelServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = ModelServiceClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.ModelServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.ModelServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ModelServiceGrpcTransport, + transports.ModelServiceGrpcAsyncIOTransport, + transports.ModelServiceRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "rest", + ], +) +def test_transport_kind(transport_name): + transport = ModelServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.ModelServiceGrpcTransport, + ) + + +def test_model_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.ModelServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_model_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.ai.generativelanguage_v1beta.services.model_service.transports.ModelServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.ModelServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "get_model", + "list_models", + "get_tuned_model", + "list_tuned_models", + "create_tuned_model", + "update_tuned_model", + "delete_tuned_model", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_model_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch( + "google.ai.generativelanguage_v1beta.services.model_service.transports.ModelServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ModelServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=None, + default_scopes=(), + quota_project_id="octopus", + ) + + +def test_model_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( + "google.ai.generativelanguage_v1beta.services.model_service.transports.ModelServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ModelServiceTransport() + adc.assert_called_once() + + +def test_model_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + ModelServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=(), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ModelServiceGrpcTransport, + transports.ModelServiceGrpcAsyncIOTransport, + ], +) +def test_model_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=(), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ModelServiceGrpcTransport, + transports.ModelServiceGrpcAsyncIOTransport, + transports.ModelServiceRestTransport, + ], +) +def test_model_service_transport_auth_gdch_credentials(transport_class): + host = "https://language.com" + api_audience_tests = [None, "https://language2.com"] + api_audience_expect = [host, "https://language2.com"] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, "default", autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock( + return_value=gdch_mock + ) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with(e) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.ModelServiceGrpcTransport, grpc_helpers), + (transports.ModelServiceGrpcAsyncIOTransport, grpc_helpers_async), + ], +) +def test_model_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + + create_channel.assert_called_with( + "generativelanguage.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=(), + scopes=["1", "2"], + default_host="generativelanguage.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "transport_class", + [transports.ModelServiceGrpcTransport, transports.ModelServiceGrpcAsyncIOTransport], +) +def test_model_service_grpc_transport_client_cert_source_for_mtls(transport_class): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_model_service_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ) as mock_configure_mtls_channel: + transports.ModelServiceRestTransport( + credentials=cred, client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_model_service_rest_lro_client(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.AbstractOperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_model_service_host_no_port(transport_name): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="generativelanguage.googleapis.com" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "generativelanguage.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://generativelanguage.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_model_service_host_with_port(transport_name): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="generativelanguage.googleapis.com:8000" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "generativelanguage.googleapis.com:8000" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://generativelanguage.googleapis.com:8000" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "rest", + ], +) +def test_model_service_client_transport_session_collision(transport_name): + creds1 = ga_credentials.AnonymousCredentials() + creds2 = ga_credentials.AnonymousCredentials() + client1 = ModelServiceClient( + credentials=creds1, + transport=transport_name, + ) + client2 = ModelServiceClient( + credentials=creds2, + transport=transport_name, + ) + session1 = client1.transport.get_model._session + session2 = client2.transport.get_model._session + assert session1 != session2 + session1 = client1.transport.list_models._session + session2 = client2.transport.list_models._session + assert session1 != session2 + session1 = client1.transport.get_tuned_model._session + session2 = client2.transport.get_tuned_model._session + assert session1 != session2 + session1 = client1.transport.list_tuned_models._session + session2 = client2.transport.list_tuned_models._session + assert session1 != session2 + session1 = client1.transport.create_tuned_model._session + session2 = client2.transport.create_tuned_model._session + assert session1 != session2 + session1 = client1.transport.update_tuned_model._session + session2 = client2.transport.update_tuned_model._session + assert session1 != session2 + session1 = client1.transport.delete_tuned_model._session + session2 = client2.transport.delete_tuned_model._session + assert session1 != session2 + + +def test_model_service_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.ModelServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_model_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.ModelServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [transports.ModelServiceGrpcTransport, transports.ModelServiceGrpcAsyncIOTransport], +) +def test_model_service_transport_channel_mtls_with_client_cert_source(transport_class): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [transports.ModelServiceGrpcTransport, transports.ModelServiceGrpcAsyncIOTransport], +) +def test_model_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_model_service_grpc_lro_client(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_model_service_grpc_lro_async_client(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_model_path(): + model = "squid" + expected = "models/{model}".format( + model=model, + ) + actual = ModelServiceClient.model_path(model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "model": "clam", + } + path = ModelServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_model_path(path) + assert expected == actual + + +def test_tuned_model_path(): + tuned_model = "whelk" + expected = "tunedModels/{tuned_model}".format( + tuned_model=tuned_model, + ) + actual = ModelServiceClient.tuned_model_path(tuned_model) + assert expected == actual + + +def test_parse_tuned_model_path(): + expected = { + "tuned_model": "octopus", + } + path = ModelServiceClient.tuned_model_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_tuned_model_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "oyster" + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = ModelServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nudibranch", + } + path = ModelServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "cuttlefish" + expected = "folders/{folder}".format( + folder=folder, + ) + actual = ModelServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "mussel", + } + path = ModelServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "winkle" + expected = "organizations/{organization}".format( + organization=organization, + ) + actual = ModelServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nautilus", + } + path = ModelServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "scallop" + expected = "projects/{project}".format( + project=project, + ) + actual = ModelServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "abalone", + } + path = ModelServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "squid" + location = "clam" + expected = "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + actual = ModelServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "whelk", + "location": "octopus", + } + path = ModelServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.ModelServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.ModelServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = ModelServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "rest": "_session", + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "rest", + "grpc", + ] + for transport in transports: + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + + +@pytest.mark.parametrize( + "client_class,transport_class", + [ + (ModelServiceClient, transports.ModelServiceGrpcTransport), + (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport), + ], +) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) diff --git a/packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1beta/test_permission_service.py b/packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1beta/test_permission_service.py new file mode 100644 index 000000000000..e48c91ae23e1 --- /dev/null +++ b/packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1beta/test_permission_service.py @@ -0,0 +1,4929 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER + import mock + +from collections.abc import Iterable +import json +import math + +from google.api_core import gapic_v1, grpc_helpers, grpc_helpers_async, path_template +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +import google.auth +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import json_format +import grpc +from grpc.experimental import aio +from proto.marshal.rules import wrappers +from proto.marshal.rules.dates import DurationRule, TimestampRule +import pytest +from requests import PreparedRequest, Request, Response +from requests.sessions import Session + +from google.ai.generativelanguage_v1beta.services.permission_service import ( + PermissionServiceAsyncClient, + PermissionServiceClient, + pagers, + transports, +) +from google.ai.generativelanguage_v1beta.types import permission as gag_permission +from google.ai.generativelanguage_v1beta.types import permission +from google.ai.generativelanguage_v1beta.types import permission_service + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert PermissionServiceClient._get_default_mtls_endpoint(None) is None + assert ( + PermissionServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + PermissionServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + PermissionServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + PermissionServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + PermissionServiceClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (PermissionServiceClient, "grpc"), + (PermissionServiceAsyncClient, "grpc_asyncio"), + (PermissionServiceClient, "rest"), + ], +) +def test_permission_service_client_from_service_account_info( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "generativelanguage.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://generativelanguage.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.PermissionServiceGrpcTransport, "grpc"), + (transports.PermissionServiceGrpcAsyncIOTransport, "grpc_asyncio"), + (transports.PermissionServiceRestTransport, "rest"), + ], +) +def test_permission_service_client_service_account_always_use_jwt( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (PermissionServiceClient, "grpc"), + (PermissionServiceAsyncClient, "grpc_asyncio"), + (PermissionServiceClient, "rest"), + ], +) +def test_permission_service_client_from_service_account_file( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "generativelanguage.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://generativelanguage.googleapis.com" + ) + + +def test_permission_service_client_get_transport_class(): + transport = PermissionServiceClient.get_transport_class() + available_transports = [ + transports.PermissionServiceGrpcTransport, + transports.PermissionServiceRestTransport, + ] + assert transport in available_transports + + transport = PermissionServiceClient.get_transport_class("grpc") + assert transport == transports.PermissionServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (PermissionServiceClient, transports.PermissionServiceGrpcTransport, "grpc"), + ( + PermissionServiceAsyncClient, + transports.PermissionServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + (PermissionServiceClient, transports.PermissionServiceRestTransport, "rest"), + ], +) +@mock.patch.object( + PermissionServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(PermissionServiceClient), +) +@mock.patch.object( + PermissionServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(PermissionServiceAsyncClient), +) +def test_permission_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(PermissionServiceClient, "get_transport_class") as gtc: + transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(PermissionServiceClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions( + api_audience="https://language.googleapis.com" + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com", + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + PermissionServiceClient, + transports.PermissionServiceGrpcTransport, + "grpc", + "true", + ), + ( + PermissionServiceAsyncClient, + transports.PermissionServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + PermissionServiceClient, + transports.PermissionServiceGrpcTransport, + "grpc", + "false", + ), + ( + PermissionServiceAsyncClient, + transports.PermissionServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ( + PermissionServiceClient, + transports.PermissionServiceRestTransport, + "rest", + "true", + ), + ( + PermissionServiceClient, + transports.PermissionServiceRestTransport, + "rest", + "false", + ), + ], +) +@mock.patch.object( + PermissionServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(PermissionServiceClient), +) +@mock.patch.object( + PermissionServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(PermissionServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_permission_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class", [PermissionServiceClient, PermissionServiceAsyncClient] +) +@mock.patch.object( + PermissionServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(PermissionServiceClient), +) +@mock.patch.object( + PermissionServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(PermissionServiceAsyncClient), +) +def test_permission_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_client_cert_source, + ): + ( + api_endpoint, + cert_source, + ) = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (PermissionServiceClient, transports.PermissionServiceGrpcTransport, "grpc"), + ( + PermissionServiceAsyncClient, + transports.PermissionServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + (PermissionServiceClient, transports.PermissionServiceRestTransport, "rest"), + ], +) +def test_permission_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + PermissionServiceClient, + transports.PermissionServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + PermissionServiceAsyncClient, + transports.PermissionServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ( + PermissionServiceClient, + transports.PermissionServiceRestTransport, + "rest", + None, + ), + ], +) +def test_permission_service_client_client_options_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +def test_permission_service_client_client_options_from_dict(): + with mock.patch( + "google.ai.generativelanguage_v1beta.services.permission_service.transports.PermissionServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = PermissionServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + PermissionServiceClient, + transports.PermissionServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + PermissionServiceAsyncClient, + transports.PermissionServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ], +) +def test_permission_service_client_create_channel_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "generativelanguage.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=(), + scopes=None, + default_host="generativelanguage.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + permission_service.CreatePermissionRequest, + dict, + ], +) +def test_create_permission(request_type, transport: str = "grpc"): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_permission), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gag_permission.Permission( + name="name_value", + grantee_type=gag_permission.Permission.GranteeType.USER, + email_address="email_address_value", + role=gag_permission.Permission.Role.OWNER, + ) + response = client.create_permission(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == permission_service.CreatePermissionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gag_permission.Permission) + assert response.name == "name_value" + assert response.grantee_type == gag_permission.Permission.GranteeType.USER + assert response.email_address == "email_address_value" + assert response.role == gag_permission.Permission.Role.OWNER + + +def test_create_permission_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_permission), "__call__" + ) as call: + client.create_permission() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == permission_service.CreatePermissionRequest() + + +@pytest.mark.asyncio +async def test_create_permission_async( + transport: str = "grpc_asyncio", + request_type=permission_service.CreatePermissionRequest, +): + client = PermissionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_permission), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gag_permission.Permission( + name="name_value", + grantee_type=gag_permission.Permission.GranteeType.USER, + email_address="email_address_value", + role=gag_permission.Permission.Role.OWNER, + ) + ) + response = await client.create_permission(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == permission_service.CreatePermissionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gag_permission.Permission) + assert response.name == "name_value" + assert response.grantee_type == gag_permission.Permission.GranteeType.USER + assert response.email_address == "email_address_value" + assert response.role == gag_permission.Permission.Role.OWNER + + +@pytest.mark.asyncio +async def test_create_permission_async_from_dict(): + await test_create_permission_async(request_type=dict) + + +def test_create_permission_field_headers(): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = permission_service.CreatePermissionRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_permission), "__call__" + ) as call: + call.return_value = gag_permission.Permission() + client.create_permission(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_permission_field_headers_async(): + client = PermissionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = permission_service.CreatePermissionRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_permission), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gag_permission.Permission() + ) + await client.create_permission(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_create_permission_flattened(): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_permission), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gag_permission.Permission() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_permission( + parent="parent_value", + permission=gag_permission.Permission(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].permission + mock_val = gag_permission.Permission(name="name_value") + assert arg == mock_val + + +def test_create_permission_flattened_error(): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_permission( + permission_service.CreatePermissionRequest(), + parent="parent_value", + permission=gag_permission.Permission(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_permission_flattened_async(): + client = PermissionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_permission), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gag_permission.Permission() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gag_permission.Permission() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_permission( + parent="parent_value", + permission=gag_permission.Permission(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].permission + mock_val = gag_permission.Permission(name="name_value") + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_create_permission_flattened_error_async(): + client = PermissionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_permission( + permission_service.CreatePermissionRequest(), + parent="parent_value", + permission=gag_permission.Permission(name="name_value"), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + permission_service.GetPermissionRequest, + dict, + ], +) +def test_get_permission(request_type, transport: str = "grpc"): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_permission), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = permission.Permission( + name="name_value", + grantee_type=permission.Permission.GranteeType.USER, + email_address="email_address_value", + role=permission.Permission.Role.OWNER, + ) + response = client.get_permission(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == permission_service.GetPermissionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, permission.Permission) + assert response.name == "name_value" + assert response.grantee_type == permission.Permission.GranteeType.USER + assert response.email_address == "email_address_value" + assert response.role == permission.Permission.Role.OWNER + + +def test_get_permission_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_permission), "__call__") as call: + client.get_permission() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == permission_service.GetPermissionRequest() + + +@pytest.mark.asyncio +async def test_get_permission_async( + transport: str = "grpc_asyncio", + request_type=permission_service.GetPermissionRequest, +): + client = PermissionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_permission), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + permission.Permission( + name="name_value", + grantee_type=permission.Permission.GranteeType.USER, + email_address="email_address_value", + role=permission.Permission.Role.OWNER, + ) + ) + response = await client.get_permission(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == permission_service.GetPermissionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, permission.Permission) + assert response.name == "name_value" + assert response.grantee_type == permission.Permission.GranteeType.USER + assert response.email_address == "email_address_value" + assert response.role == permission.Permission.Role.OWNER + + +@pytest.mark.asyncio +async def test_get_permission_async_from_dict(): + await test_get_permission_async(request_type=dict) + + +def test_get_permission_field_headers(): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = permission_service.GetPermissionRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_permission), "__call__") as call: + call.return_value = permission.Permission() + client.get_permission(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_permission_field_headers_async(): + client = PermissionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = permission_service.GetPermissionRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_permission), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + permission.Permission() + ) + await client.get_permission(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_permission_flattened(): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_permission), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = permission.Permission() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_permission( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_permission_flattened_error(): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_permission( + permission_service.GetPermissionRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_permission_flattened_async(): + client = PermissionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_permission), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = permission.Permission() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + permission.Permission() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_permission( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_permission_flattened_error_async(): + client = PermissionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_permission( + permission_service.GetPermissionRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + permission_service.ListPermissionsRequest, + dict, + ], +) +def test_list_permissions(request_type, transport: str = "grpc"): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_permissions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = permission_service.ListPermissionsResponse( + next_page_token="next_page_token_value", + ) + response = client.list_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == permission_service.ListPermissionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPermissionsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_permissions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_permissions), "__call__") as call: + client.list_permissions() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == permission_service.ListPermissionsRequest() + + +@pytest.mark.asyncio +async def test_list_permissions_async( + transport: str = "grpc_asyncio", + request_type=permission_service.ListPermissionsRequest, +): + client = PermissionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_permissions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + permission_service.ListPermissionsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == permission_service.ListPermissionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPermissionsAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_permissions_async_from_dict(): + await test_list_permissions_async(request_type=dict) + + +def test_list_permissions_field_headers(): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = permission_service.ListPermissionsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_permissions), "__call__") as call: + call.return_value = permission_service.ListPermissionsResponse() + client.list_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_permissions_field_headers_async(): + client = PermissionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = permission_service.ListPermissionsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_permissions), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + permission_service.ListPermissionsResponse() + ) + await client.list_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_permissions_flattened(): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_permissions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = permission_service.ListPermissionsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_permissions( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +def test_list_permissions_flattened_error(): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_permissions( + permission_service.ListPermissionsRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_permissions_flattened_async(): + client = PermissionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_permissions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = permission_service.ListPermissionsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + permission_service.ListPermissionsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_permissions( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_permissions_flattened_error_async(): + client = PermissionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_permissions( + permission_service.ListPermissionsRequest(), + parent="parent_value", + ) + + +def test_list_permissions_pager(transport_name: str = "grpc"): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_permissions), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + permission_service.ListPermissionsResponse( + permissions=[ + permission.Permission(), + permission.Permission(), + permission.Permission(), + ], + next_page_token="abc", + ), + permission_service.ListPermissionsResponse( + permissions=[], + next_page_token="def", + ), + permission_service.ListPermissionsResponse( + permissions=[ + permission.Permission(), + ], + next_page_token="ghi", + ), + permission_service.ListPermissionsResponse( + permissions=[ + permission.Permission(), + permission.Permission(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_permissions(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, permission.Permission) for i in results) + + +def test_list_permissions_pages(transport_name: str = "grpc"): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_permissions), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + permission_service.ListPermissionsResponse( + permissions=[ + permission.Permission(), + permission.Permission(), + permission.Permission(), + ], + next_page_token="abc", + ), + permission_service.ListPermissionsResponse( + permissions=[], + next_page_token="def", + ), + permission_service.ListPermissionsResponse( + permissions=[ + permission.Permission(), + ], + next_page_token="ghi", + ), + permission_service.ListPermissionsResponse( + permissions=[ + permission.Permission(), + permission.Permission(), + ], + ), + RuntimeError, + ) + pages = list(client.list_permissions(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_permissions_async_pager(): + client = PermissionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_permissions), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + permission_service.ListPermissionsResponse( + permissions=[ + permission.Permission(), + permission.Permission(), + permission.Permission(), + ], + next_page_token="abc", + ), + permission_service.ListPermissionsResponse( + permissions=[], + next_page_token="def", + ), + permission_service.ListPermissionsResponse( + permissions=[ + permission.Permission(), + ], + next_page_token="ghi", + ), + permission_service.ListPermissionsResponse( + permissions=[ + permission.Permission(), + permission.Permission(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_permissions( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, permission.Permission) for i in responses) + + +@pytest.mark.asyncio +async def test_list_permissions_async_pages(): + client = PermissionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_permissions), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + permission_service.ListPermissionsResponse( + permissions=[ + permission.Permission(), + permission.Permission(), + permission.Permission(), + ], + next_page_token="abc", + ), + permission_service.ListPermissionsResponse( + permissions=[], + next_page_token="def", + ), + permission_service.ListPermissionsResponse( + permissions=[ + permission.Permission(), + ], + next_page_token="ghi", + ), + permission_service.ListPermissionsResponse( + permissions=[ + permission.Permission(), + permission.Permission(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_permissions(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + permission_service.UpdatePermissionRequest, + dict, + ], +) +def test_update_permission(request_type, transport: str = "grpc"): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_permission), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gag_permission.Permission( + name="name_value", + grantee_type=gag_permission.Permission.GranteeType.USER, + email_address="email_address_value", + role=gag_permission.Permission.Role.OWNER, + ) + response = client.update_permission(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == permission_service.UpdatePermissionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gag_permission.Permission) + assert response.name == "name_value" + assert response.grantee_type == gag_permission.Permission.GranteeType.USER + assert response.email_address == "email_address_value" + assert response.role == gag_permission.Permission.Role.OWNER + + +def test_update_permission_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_permission), "__call__" + ) as call: + client.update_permission() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == permission_service.UpdatePermissionRequest() + + +@pytest.mark.asyncio +async def test_update_permission_async( + transport: str = "grpc_asyncio", + request_type=permission_service.UpdatePermissionRequest, +): + client = PermissionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_permission), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gag_permission.Permission( + name="name_value", + grantee_type=gag_permission.Permission.GranteeType.USER, + email_address="email_address_value", + role=gag_permission.Permission.Role.OWNER, + ) + ) + response = await client.update_permission(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == permission_service.UpdatePermissionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gag_permission.Permission) + assert response.name == "name_value" + assert response.grantee_type == gag_permission.Permission.GranteeType.USER + assert response.email_address == "email_address_value" + assert response.role == gag_permission.Permission.Role.OWNER + + +@pytest.mark.asyncio +async def test_update_permission_async_from_dict(): + await test_update_permission_async(request_type=dict) + + +def test_update_permission_field_headers(): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = permission_service.UpdatePermissionRequest() + + request.permission.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_permission), "__call__" + ) as call: + call.return_value = gag_permission.Permission() + client.update_permission(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "permission.name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_permission_field_headers_async(): + client = PermissionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = permission_service.UpdatePermissionRequest() + + request.permission.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_permission), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gag_permission.Permission() + ) + await client.update_permission(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "permission.name=name_value", + ) in kw["metadata"] + + +def test_update_permission_flattened(): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_permission), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gag_permission.Permission() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_permission( + permission=gag_permission.Permission(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].permission + mock_val = gag_permission.Permission(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +def test_update_permission_flattened_error(): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_permission( + permission_service.UpdatePermissionRequest(), + permission=gag_permission.Permission(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_permission_flattened_async(): + client = PermissionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_permission), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gag_permission.Permission() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gag_permission.Permission() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_permission( + permission=gag_permission.Permission(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].permission + mock_val = gag_permission.Permission(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_update_permission_flattened_error_async(): + client = PermissionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_permission( + permission_service.UpdatePermissionRequest(), + permission=gag_permission.Permission(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + permission_service.DeletePermissionRequest, + dict, + ], +) +def test_delete_permission(request_type, transport: str = "grpc"): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_permission), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_permission(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == permission_service.DeletePermissionRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_permission_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_permission), "__call__" + ) as call: + client.delete_permission() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == permission_service.DeletePermissionRequest() + + +@pytest.mark.asyncio +async def test_delete_permission_async( + transport: str = "grpc_asyncio", + request_type=permission_service.DeletePermissionRequest, +): + client = PermissionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_permission), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_permission(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == permission_service.DeletePermissionRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_permission_async_from_dict(): + await test_delete_permission_async(request_type=dict) + + +def test_delete_permission_field_headers(): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = permission_service.DeletePermissionRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_permission), "__call__" + ) as call: + call.return_value = None + client.delete_permission(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_permission_field_headers_async(): + client = PermissionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = permission_service.DeletePermissionRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_permission), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_permission(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_delete_permission_flattened(): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_permission), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_permission( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_delete_permission_flattened_error(): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_permission( + permission_service.DeletePermissionRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_permission_flattened_async(): + client = PermissionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_permission), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_permission( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_delete_permission_flattened_error_async(): + client = PermissionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_permission( + permission_service.DeletePermissionRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + permission_service.TransferOwnershipRequest, + dict, + ], +) +def test_transfer_ownership(request_type, transport: str = "grpc"): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.transfer_ownership), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = permission_service.TransferOwnershipResponse() + response = client.transfer_ownership(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == permission_service.TransferOwnershipRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, permission_service.TransferOwnershipResponse) + + +def test_transfer_ownership_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.transfer_ownership), "__call__" + ) as call: + client.transfer_ownership() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == permission_service.TransferOwnershipRequest() + + +@pytest.mark.asyncio +async def test_transfer_ownership_async( + transport: str = "grpc_asyncio", + request_type=permission_service.TransferOwnershipRequest, +): + client = PermissionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.transfer_ownership), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + permission_service.TransferOwnershipResponse() + ) + response = await client.transfer_ownership(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == permission_service.TransferOwnershipRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, permission_service.TransferOwnershipResponse) + + +@pytest.mark.asyncio +async def test_transfer_ownership_async_from_dict(): + await test_transfer_ownership_async(request_type=dict) + + +def test_transfer_ownership_field_headers(): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = permission_service.TransferOwnershipRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.transfer_ownership), "__call__" + ) as call: + call.return_value = permission_service.TransferOwnershipResponse() + client.transfer_ownership(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_transfer_ownership_field_headers_async(): + client = PermissionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = permission_service.TransferOwnershipRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.transfer_ownership), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + permission_service.TransferOwnershipResponse() + ) + await client.transfer_ownership(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.parametrize( + "request_type", + [ + permission_service.CreatePermissionRequest, + dict, + ], +) +def test_create_permission_rest(request_type): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "tunedModels/sample1"} + request_init["permission"] = { + "name": "name_value", + "grantee_type": 1, + "email_address": "email_address_value", + "role": 1, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = permission_service.CreatePermissionRequest.meta.fields["permission"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["permission"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["permission"][field])): + del request_init["permission"][field][i][subfield] + else: + del request_init["permission"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = gag_permission.Permission( + name="name_value", + grantee_type=gag_permission.Permission.GranteeType.USER, + email_address="email_address_value", + role=gag_permission.Permission.Role.OWNER, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = gag_permission.Permission.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.create_permission(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, gag_permission.Permission) + assert response.name == "name_value" + assert response.grantee_type == gag_permission.Permission.GranteeType.USER + assert response.email_address == "email_address_value" + assert response.role == gag_permission.Permission.Role.OWNER + + +def test_create_permission_rest_required_fields( + request_type=permission_service.CreatePermissionRequest, +): + transport_class = transports.PermissionServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_permission._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_permission._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = gag_permission.Permission() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = gag_permission.Permission.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.create_permission(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_permission_rest_unset_required_fields(): + transport = transports.PermissionServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_permission._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "parent", + "permission", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_permission_rest_interceptors(null_interceptor): + transport = transports.PermissionServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.PermissionServiceRestInterceptor(), + ) + client = PermissionServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.PermissionServiceRestInterceptor, "post_create_permission" + ) as post, mock.patch.object( + transports.PermissionServiceRestInterceptor, "pre_create_permission" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = permission_service.CreatePermissionRequest.pb( + permission_service.CreatePermissionRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = gag_permission.Permission.to_json( + gag_permission.Permission() + ) + + request = permission_service.CreatePermissionRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = gag_permission.Permission() + + client.create_permission( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_create_permission_rest_bad_request( + transport: str = "rest", request_type=permission_service.CreatePermissionRequest +): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "tunedModels/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_permission(request) + + +def test_create_permission_rest_flattened(): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = gag_permission.Permission() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "tunedModels/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + permission=gag_permission.Permission(name="name_value"), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = gag_permission.Permission.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.create_permission(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{parent=tunedModels/*}/permissions" % client.transport._host, + args[1], + ) + + +def test_create_permission_rest_flattened_error(transport: str = "rest"): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_permission( + permission_service.CreatePermissionRequest(), + parent="parent_value", + permission=gag_permission.Permission(name="name_value"), + ) + + +def test_create_permission_rest_error(): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + permission_service.GetPermissionRequest, + dict, + ], +) +def test_get_permission_rest(request_type): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "tunedModels/sample1/permissions/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = permission.Permission( + name="name_value", + grantee_type=permission.Permission.GranteeType.USER, + email_address="email_address_value", + role=permission.Permission.Role.OWNER, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = permission.Permission.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_permission(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, permission.Permission) + assert response.name == "name_value" + assert response.grantee_type == permission.Permission.GranteeType.USER + assert response.email_address == "email_address_value" + assert response.role == permission.Permission.Role.OWNER + + +def test_get_permission_rest_required_fields( + request_type=permission_service.GetPermissionRequest, +): + transport_class = transports.PermissionServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_permission._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_permission._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = permission.Permission() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = permission.Permission.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_permission(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_permission_rest_unset_required_fields(): + transport = transports.PermissionServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_permission._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_permission_rest_interceptors(null_interceptor): + transport = transports.PermissionServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.PermissionServiceRestInterceptor(), + ) + client = PermissionServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.PermissionServiceRestInterceptor, "post_get_permission" + ) as post, mock.patch.object( + transports.PermissionServiceRestInterceptor, "pre_get_permission" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = permission_service.GetPermissionRequest.pb( + permission_service.GetPermissionRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = permission.Permission.to_json( + permission.Permission() + ) + + request = permission_service.GetPermissionRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = permission.Permission() + + client.get_permission( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_permission_rest_bad_request( + transport: str = "rest", request_type=permission_service.GetPermissionRequest +): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "tunedModels/sample1/permissions/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_permission(request) + + +def test_get_permission_rest_flattened(): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = permission.Permission() + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "tunedModels/sample1/permissions/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = permission.Permission.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_permission(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{name=tunedModels/*/permissions/*}" % client.transport._host, + args[1], + ) + + +def test_get_permission_rest_flattened_error(transport: str = "rest"): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_permission( + permission_service.GetPermissionRequest(), + name="name_value", + ) + + +def test_get_permission_rest_error(): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + permission_service.ListPermissionsRequest, + dict, + ], +) +def test_list_permissions_rest(request_type): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "tunedModels/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = permission_service.ListPermissionsResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = permission_service.ListPermissionsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPermissionsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_permissions_rest_required_fields( + request_type=permission_service.ListPermissionsRequest, +): + transport_class = transports.PermissionServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_permissions._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_permissions._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = permission_service.ListPermissionsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = permission_service.ListPermissionsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_permissions(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_permissions_rest_unset_required_fields(): + transport = transports.PermissionServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_permissions._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_permissions_rest_interceptors(null_interceptor): + transport = transports.PermissionServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.PermissionServiceRestInterceptor(), + ) + client = PermissionServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.PermissionServiceRestInterceptor, "post_list_permissions" + ) as post, mock.patch.object( + transports.PermissionServiceRestInterceptor, "pre_list_permissions" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = permission_service.ListPermissionsRequest.pb( + permission_service.ListPermissionsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = permission_service.ListPermissionsResponse.to_json( + permission_service.ListPermissionsResponse() + ) + + request = permission_service.ListPermissionsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = permission_service.ListPermissionsResponse() + + client.list_permissions( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_permissions_rest_bad_request( + transport: str = "rest", request_type=permission_service.ListPermissionsRequest +): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "tunedModels/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_permissions(request) + + +def test_list_permissions_rest_flattened(): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = permission_service.ListPermissionsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "tunedModels/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = permission_service.ListPermissionsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.list_permissions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{parent=tunedModels/*}/permissions" % client.transport._host, + args[1], + ) + + +def test_list_permissions_rest_flattened_error(transport: str = "rest"): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_permissions( + permission_service.ListPermissionsRequest(), + parent="parent_value", + ) + + +def test_list_permissions_rest_pager(transport: str = "rest"): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + permission_service.ListPermissionsResponse( + permissions=[ + permission.Permission(), + permission.Permission(), + permission.Permission(), + ], + next_page_token="abc", + ), + permission_service.ListPermissionsResponse( + permissions=[], + next_page_token="def", + ), + permission_service.ListPermissionsResponse( + permissions=[ + permission.Permission(), + ], + next_page_token="ghi", + ), + permission_service.ListPermissionsResponse( + permissions=[ + permission.Permission(), + permission.Permission(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + permission_service.ListPermissionsResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "tunedModels/sample1"} + + pager = client.list_permissions(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, permission.Permission) for i in results) + + pages = list(client.list_permissions(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + permission_service.UpdatePermissionRequest, + dict, + ], +) +def test_update_permission_rest(request_type): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"permission": {"name": "tunedModels/sample1/permissions/sample2"}} + request_init["permission"] = { + "name": "tunedModels/sample1/permissions/sample2", + "grantee_type": 1, + "email_address": "email_address_value", + "role": 1, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = permission_service.UpdatePermissionRequest.meta.fields["permission"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["permission"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["permission"][field])): + del request_init["permission"][field][i][subfield] + else: + del request_init["permission"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = gag_permission.Permission( + name="name_value", + grantee_type=gag_permission.Permission.GranteeType.USER, + email_address="email_address_value", + role=gag_permission.Permission.Role.OWNER, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = gag_permission.Permission.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.update_permission(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, gag_permission.Permission) + assert response.name == "name_value" + assert response.grantee_type == gag_permission.Permission.GranteeType.USER + assert response.email_address == "email_address_value" + assert response.role == gag_permission.Permission.Role.OWNER + + +def test_update_permission_rest_required_fields( + request_type=permission_service.UpdatePermissionRequest, +): + transport_class = transports.PermissionServiceRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_permission._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_permission._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = gag_permission.Permission() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = gag_permission.Permission.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.update_permission(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_update_permission_rest_unset_required_fields(): + transport = transports.PermissionServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.update_permission._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("updateMask",)) + & set( + ( + "permission", + "updateMask", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_permission_rest_interceptors(null_interceptor): + transport = transports.PermissionServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.PermissionServiceRestInterceptor(), + ) + client = PermissionServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.PermissionServiceRestInterceptor, "post_update_permission" + ) as post, mock.patch.object( + transports.PermissionServiceRestInterceptor, "pre_update_permission" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = permission_service.UpdatePermissionRequest.pb( + permission_service.UpdatePermissionRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = gag_permission.Permission.to_json( + gag_permission.Permission() + ) + + request = permission_service.UpdatePermissionRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = gag_permission.Permission() + + client.update_permission( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_update_permission_rest_bad_request( + transport: str = "rest", request_type=permission_service.UpdatePermissionRequest +): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"permission": {"name": "tunedModels/sample1/permissions/sample2"}} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_permission(request) + + +def test_update_permission_rest_flattened(): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = gag_permission.Permission() + + # get arguments that satisfy an http rule for this method + sample_request = { + "permission": {"name": "tunedModels/sample1/permissions/sample2"} + } + + # get truthy value for each flattened field + mock_args = dict( + permission=gag_permission.Permission(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = gag_permission.Permission.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.update_permission(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{permission.name=tunedModels/*/permissions/*}" + % client.transport._host, + args[1], + ) + + +def test_update_permission_rest_flattened_error(transport: str = "rest"): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_permission( + permission_service.UpdatePermissionRequest(), + permission=gag_permission.Permission(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_update_permission_rest_error(): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + permission_service.DeletePermissionRequest, + dict, + ], +) +def test_delete_permission_rest(request_type): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "tunedModels/sample1/permissions/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.delete_permission(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_permission_rest_required_fields( + request_type=permission_service.DeletePermissionRequest, +): + transport_class = transports.PermissionServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_permission._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_permission._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.delete_permission(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_permission_rest_unset_required_fields(): + transport = transports.PermissionServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_permission._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_permission_rest_interceptors(null_interceptor): + transport = transports.PermissionServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.PermissionServiceRestInterceptor(), + ) + client = PermissionServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.PermissionServiceRestInterceptor, "pre_delete_permission" + ) as pre: + pre.assert_not_called() + pb_message = permission_service.DeletePermissionRequest.pb( + permission_service.DeletePermissionRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + + request = permission_service.DeletePermissionRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.delete_permission( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + + +def test_delete_permission_rest_bad_request( + transport: str = "rest", request_type=permission_service.DeletePermissionRequest +): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "tunedModels/sample1/permissions/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_permission(request) + + +def test_delete_permission_rest_flattened(): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "tunedModels/sample1/permissions/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.delete_permission(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{name=tunedModels/*/permissions/*}" % client.transport._host, + args[1], + ) + + +def test_delete_permission_rest_flattened_error(transport: str = "rest"): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_permission( + permission_service.DeletePermissionRequest(), + name="name_value", + ) + + +def test_delete_permission_rest_error(): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + permission_service.TransferOwnershipRequest, + dict, + ], +) +def test_transfer_ownership_rest(request_type): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "tunedModels/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = permission_service.TransferOwnershipResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = permission_service.TransferOwnershipResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.transfer_ownership(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, permission_service.TransferOwnershipResponse) + + +def test_transfer_ownership_rest_required_fields( + request_type=permission_service.TransferOwnershipRequest, +): + transport_class = transports.PermissionServiceRestTransport + + request_init = {} + request_init["name"] = "" + request_init["email_address"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).transfer_ownership._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + jsonified_request["emailAddress"] = "email_address_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).transfer_ownership._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + assert "emailAddress" in jsonified_request + assert jsonified_request["emailAddress"] == "email_address_value" + + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = permission_service.TransferOwnershipResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = permission_service.TransferOwnershipResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.transfer_ownership(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_transfer_ownership_rest_unset_required_fields(): + transport = transports.PermissionServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.transfer_ownership._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "name", + "emailAddress", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_transfer_ownership_rest_interceptors(null_interceptor): + transport = transports.PermissionServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.PermissionServiceRestInterceptor(), + ) + client = PermissionServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.PermissionServiceRestInterceptor, "post_transfer_ownership" + ) as post, mock.patch.object( + transports.PermissionServiceRestInterceptor, "pre_transfer_ownership" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = permission_service.TransferOwnershipRequest.pb( + permission_service.TransferOwnershipRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = ( + permission_service.TransferOwnershipResponse.to_json( + permission_service.TransferOwnershipResponse() + ) + ) + + request = permission_service.TransferOwnershipRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = permission_service.TransferOwnershipResponse() + + client.transfer_ownership( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_transfer_ownership_rest_bad_request( + transport: str = "rest", request_type=permission_service.TransferOwnershipRequest +): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "tunedModels/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.transfer_ownership(request) + + +def test_transfer_ownership_rest_error(): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.PermissionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.PermissionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PermissionServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.PermissionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = PermissionServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = PermissionServiceClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.PermissionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PermissionServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.PermissionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = PermissionServiceClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.PermissionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.PermissionServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.PermissionServiceGrpcTransport, + transports.PermissionServiceGrpcAsyncIOTransport, + transports.PermissionServiceRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "rest", + ], +) +def test_transport_kind(transport_name): + transport = PermissionServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.PermissionServiceGrpcTransport, + ) + + +def test_permission_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.PermissionServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_permission_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.ai.generativelanguage_v1beta.services.permission_service.transports.PermissionServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.PermissionServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_permission", + "get_permission", + "list_permissions", + "update_permission", + "delete_permission", + "transfer_ownership", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_permission_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch( + "google.ai.generativelanguage_v1beta.services.permission_service.transports.PermissionServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.PermissionServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=None, + default_scopes=(), + quota_project_id="octopus", + ) + + +def test_permission_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( + "google.ai.generativelanguage_v1beta.services.permission_service.transports.PermissionServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.PermissionServiceTransport() + adc.assert_called_once() + + +def test_permission_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + PermissionServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=(), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.PermissionServiceGrpcTransport, + transports.PermissionServiceGrpcAsyncIOTransport, + ], +) +def test_permission_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=(), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.PermissionServiceGrpcTransport, + transports.PermissionServiceGrpcAsyncIOTransport, + transports.PermissionServiceRestTransport, + ], +) +def test_permission_service_transport_auth_gdch_credentials(transport_class): + host = "https://language.com" + api_audience_tests = [None, "https://language2.com"] + api_audience_expect = [host, "https://language2.com"] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, "default", autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock( + return_value=gdch_mock + ) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with(e) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.PermissionServiceGrpcTransport, grpc_helpers), + (transports.PermissionServiceGrpcAsyncIOTransport, grpc_helpers_async), + ], +) +def test_permission_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + + create_channel.assert_called_with( + "generativelanguage.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=(), + scopes=["1", "2"], + default_host="generativelanguage.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.PermissionServiceGrpcTransport, + transports.PermissionServiceGrpcAsyncIOTransport, + ], +) +def test_permission_service_grpc_transport_client_cert_source_for_mtls(transport_class): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_permission_service_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ) as mock_configure_mtls_channel: + transports.PermissionServiceRestTransport( + credentials=cred, client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_permission_service_host_no_port(transport_name): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="generativelanguage.googleapis.com" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "generativelanguage.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://generativelanguage.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_permission_service_host_with_port(transport_name): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="generativelanguage.googleapis.com:8000" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "generativelanguage.googleapis.com:8000" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://generativelanguage.googleapis.com:8000" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "rest", + ], +) +def test_permission_service_client_transport_session_collision(transport_name): + creds1 = ga_credentials.AnonymousCredentials() + creds2 = ga_credentials.AnonymousCredentials() + client1 = PermissionServiceClient( + credentials=creds1, + transport=transport_name, + ) + client2 = PermissionServiceClient( + credentials=creds2, + transport=transport_name, + ) + session1 = client1.transport.create_permission._session + session2 = client2.transport.create_permission._session + assert session1 != session2 + session1 = client1.transport.get_permission._session + session2 = client2.transport.get_permission._session + assert session1 != session2 + session1 = client1.transport.list_permissions._session + session2 = client2.transport.list_permissions._session + assert session1 != session2 + session1 = client1.transport.update_permission._session + session2 = client2.transport.update_permission._session + assert session1 != session2 + session1 = client1.transport.delete_permission._session + session2 = client2.transport.delete_permission._session + assert session1 != session2 + session1 = client1.transport.transfer_ownership._session + session2 = client2.transport.transfer_ownership._session + assert session1 != session2 + + +def test_permission_service_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.PermissionServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_permission_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.PermissionServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.PermissionServiceGrpcTransport, + transports.PermissionServiceGrpcAsyncIOTransport, + ], +) +def test_permission_service_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.PermissionServiceGrpcTransport, + transports.PermissionServiceGrpcAsyncIOTransport, + ], +) +def test_permission_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_permission_path(): + tuned_model = "squid" + permission = "clam" + expected = "tunedModels/{tuned_model}/permissions/{permission}".format( + tuned_model=tuned_model, + permission=permission, + ) + actual = PermissionServiceClient.permission_path(tuned_model, permission) + assert expected == actual + + +def test_parse_permission_path(): + expected = { + "tuned_model": "whelk", + "permission": "octopus", + } + path = PermissionServiceClient.permission_path(**expected) + + # Check that the path construction is reversible. + actual = PermissionServiceClient.parse_permission_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "oyster" + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = PermissionServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nudibranch", + } + path = PermissionServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = PermissionServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "cuttlefish" + expected = "folders/{folder}".format( + folder=folder, + ) + actual = PermissionServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "mussel", + } + path = PermissionServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = PermissionServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "winkle" + expected = "organizations/{organization}".format( + organization=organization, + ) + actual = PermissionServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nautilus", + } + path = PermissionServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = PermissionServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "scallop" + expected = "projects/{project}".format( + project=project, + ) + actual = PermissionServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "abalone", + } + path = PermissionServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = PermissionServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "squid" + location = "clam" + expected = "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + actual = PermissionServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "whelk", + "location": "octopus", + } + path = PermissionServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = PermissionServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.PermissionServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.PermissionServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = PermissionServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = PermissionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "rest": "_session", + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "rest", + "grpc", + ] + for transport in transports: + client = PermissionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + + +@pytest.mark.parametrize( + "client_class,transport_class", + [ + (PermissionServiceClient, transports.PermissionServiceGrpcTransport), + ( + PermissionServiceAsyncClient, + transports.PermissionServiceGrpcAsyncIOTransport, + ), + ], +) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) diff --git a/packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1beta/test_retriever_service.py b/packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1beta/test_retriever_service.py new file mode 100644 index 000000000000..aa722566362a --- /dev/null +++ b/packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1beta/test_retriever_service.py @@ -0,0 +1,11851 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER + import mock + +from collections.abc import Iterable +import json +import math + +from google.api_core import gapic_v1, grpc_helpers, grpc_helpers_async, path_template +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +import google.auth +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import json_format +from google.protobuf import timestamp_pb2 # type: ignore +import grpc +from grpc.experimental import aio +from proto.marshal.rules import wrappers +from proto.marshal.rules.dates import DurationRule, TimestampRule +import pytest +from requests import PreparedRequest, Request, Response +from requests.sessions import Session + +from google.ai.generativelanguage_v1beta.services.retriever_service import ( + RetrieverServiceAsyncClient, + RetrieverServiceClient, + pagers, + transports, +) +from google.ai.generativelanguage_v1beta.types import retriever, retriever_service + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert RetrieverServiceClient._get_default_mtls_endpoint(None) is None + assert ( + RetrieverServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + RetrieverServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + RetrieverServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + RetrieverServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + RetrieverServiceClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (RetrieverServiceClient, "grpc"), + (RetrieverServiceAsyncClient, "grpc_asyncio"), + (RetrieverServiceClient, "rest"), + ], +) +def test_retriever_service_client_from_service_account_info( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "generativelanguage.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://generativelanguage.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.RetrieverServiceGrpcTransport, "grpc"), + (transports.RetrieverServiceGrpcAsyncIOTransport, "grpc_asyncio"), + (transports.RetrieverServiceRestTransport, "rest"), + ], +) +def test_retriever_service_client_service_account_always_use_jwt( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (RetrieverServiceClient, "grpc"), + (RetrieverServiceAsyncClient, "grpc_asyncio"), + (RetrieverServiceClient, "rest"), + ], +) +def test_retriever_service_client_from_service_account_file( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "generativelanguage.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://generativelanguage.googleapis.com" + ) + + +def test_retriever_service_client_get_transport_class(): + transport = RetrieverServiceClient.get_transport_class() + available_transports = [ + transports.RetrieverServiceGrpcTransport, + transports.RetrieverServiceRestTransport, + ] + assert transport in available_transports + + transport = RetrieverServiceClient.get_transport_class("grpc") + assert transport == transports.RetrieverServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (RetrieverServiceClient, transports.RetrieverServiceGrpcTransport, "grpc"), + ( + RetrieverServiceAsyncClient, + transports.RetrieverServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + (RetrieverServiceClient, transports.RetrieverServiceRestTransport, "rest"), + ], +) +@mock.patch.object( + RetrieverServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(RetrieverServiceClient), +) +@mock.patch.object( + RetrieverServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(RetrieverServiceAsyncClient), +) +def test_retriever_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(RetrieverServiceClient, "get_transport_class") as gtc: + transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(RetrieverServiceClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions( + api_audience="https://language.googleapis.com" + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com", + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + RetrieverServiceClient, + transports.RetrieverServiceGrpcTransport, + "grpc", + "true", + ), + ( + RetrieverServiceAsyncClient, + transports.RetrieverServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + RetrieverServiceClient, + transports.RetrieverServiceGrpcTransport, + "grpc", + "false", + ), + ( + RetrieverServiceAsyncClient, + transports.RetrieverServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ( + RetrieverServiceClient, + transports.RetrieverServiceRestTransport, + "rest", + "true", + ), + ( + RetrieverServiceClient, + transports.RetrieverServiceRestTransport, + "rest", + "false", + ), + ], +) +@mock.patch.object( + RetrieverServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(RetrieverServiceClient), +) +@mock.patch.object( + RetrieverServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(RetrieverServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_retriever_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class", [RetrieverServiceClient, RetrieverServiceAsyncClient] +) +@mock.patch.object( + RetrieverServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(RetrieverServiceClient), +) +@mock.patch.object( + RetrieverServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(RetrieverServiceAsyncClient), +) +def test_retriever_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_client_cert_source, + ): + ( + api_endpoint, + cert_source, + ) = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (RetrieverServiceClient, transports.RetrieverServiceGrpcTransport, "grpc"), + ( + RetrieverServiceAsyncClient, + transports.RetrieverServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + (RetrieverServiceClient, transports.RetrieverServiceRestTransport, "rest"), + ], +) +def test_retriever_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + RetrieverServiceClient, + transports.RetrieverServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + RetrieverServiceAsyncClient, + transports.RetrieverServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ( + RetrieverServiceClient, + transports.RetrieverServiceRestTransport, + "rest", + None, + ), + ], +) +def test_retriever_service_client_client_options_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +def test_retriever_service_client_client_options_from_dict(): + with mock.patch( + "google.ai.generativelanguage_v1beta.services.retriever_service.transports.RetrieverServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = RetrieverServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + RetrieverServiceClient, + transports.RetrieverServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + RetrieverServiceAsyncClient, + transports.RetrieverServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ], +) +def test_retriever_service_client_create_channel_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "generativelanguage.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=(), + scopes=None, + default_host="generativelanguage.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.CreateCorpusRequest, + dict, + ], +) +def test_create_corpus(request_type, transport: str = "grpc"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_corpus), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever.Corpus( + name="name_value", + display_name="display_name_value", + ) + response = client.create_corpus(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.CreateCorpusRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever.Corpus) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + + +def test_create_corpus_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_corpus), "__call__") as call: + client.create_corpus() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.CreateCorpusRequest() + + +@pytest.mark.asyncio +async def test_create_corpus_async( + transport: str = "grpc_asyncio", request_type=retriever_service.CreateCorpusRequest +): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_corpus), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + retriever.Corpus( + name="name_value", + display_name="display_name_value", + ) + ) + response = await client.create_corpus(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.CreateCorpusRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever.Corpus) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + + +@pytest.mark.asyncio +async def test_create_corpus_async_from_dict(): + await test_create_corpus_async(request_type=dict) + + +def test_create_corpus_flattened(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_corpus), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever.Corpus() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_corpus( + corpus=retriever.Corpus(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].corpus + mock_val = retriever.Corpus(name="name_value") + assert arg == mock_val + + +def test_create_corpus_flattened_error(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_corpus( + retriever_service.CreateCorpusRequest(), + corpus=retriever.Corpus(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_corpus_flattened_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_corpus), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever.Corpus() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(retriever.Corpus()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_corpus( + corpus=retriever.Corpus(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].corpus + mock_val = retriever.Corpus(name="name_value") + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_create_corpus_flattened_error_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_corpus( + retriever_service.CreateCorpusRequest(), + corpus=retriever.Corpus(name="name_value"), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.GetCorpusRequest, + dict, + ], +) +def test_get_corpus(request_type, transport: str = "grpc"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_corpus), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever.Corpus( + name="name_value", + display_name="display_name_value", + ) + response = client.get_corpus(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.GetCorpusRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever.Corpus) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + + +def test_get_corpus_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_corpus), "__call__") as call: + client.get_corpus() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.GetCorpusRequest() + + +@pytest.mark.asyncio +async def test_get_corpus_async( + transport: str = "grpc_asyncio", request_type=retriever_service.GetCorpusRequest +): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_corpus), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + retriever.Corpus( + name="name_value", + display_name="display_name_value", + ) + ) + response = await client.get_corpus(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.GetCorpusRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever.Corpus) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + + +@pytest.mark.asyncio +async def test_get_corpus_async_from_dict(): + await test_get_corpus_async(request_type=dict) + + +def test_get_corpus_field_headers(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.GetCorpusRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_corpus), "__call__") as call: + call.return_value = retriever.Corpus() + client.get_corpus(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_corpus_field_headers_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.GetCorpusRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_corpus), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(retriever.Corpus()) + await client.get_corpus(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_corpus_flattened(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_corpus), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever.Corpus() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_corpus( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_corpus_flattened_error(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_corpus( + retriever_service.GetCorpusRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_corpus_flattened_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_corpus), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever.Corpus() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(retriever.Corpus()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_corpus( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_corpus_flattened_error_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_corpus( + retriever_service.GetCorpusRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.UpdateCorpusRequest, + dict, + ], +) +def test_update_corpus(request_type, transport: str = "grpc"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_corpus), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever.Corpus( + name="name_value", + display_name="display_name_value", + ) + response = client.update_corpus(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.UpdateCorpusRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever.Corpus) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + + +def test_update_corpus_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_corpus), "__call__") as call: + client.update_corpus() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.UpdateCorpusRequest() + + +@pytest.mark.asyncio +async def test_update_corpus_async( + transport: str = "grpc_asyncio", request_type=retriever_service.UpdateCorpusRequest +): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_corpus), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + retriever.Corpus( + name="name_value", + display_name="display_name_value", + ) + ) + response = await client.update_corpus(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.UpdateCorpusRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever.Corpus) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + + +@pytest.mark.asyncio +async def test_update_corpus_async_from_dict(): + await test_update_corpus_async(request_type=dict) + + +def test_update_corpus_field_headers(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.UpdateCorpusRequest() + + request.corpus.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_corpus), "__call__") as call: + call.return_value = retriever.Corpus() + client.update_corpus(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "corpus.name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_corpus_field_headers_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.UpdateCorpusRequest() + + request.corpus.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_corpus), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(retriever.Corpus()) + await client.update_corpus(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "corpus.name=name_value", + ) in kw["metadata"] + + +def test_update_corpus_flattened(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_corpus), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever.Corpus() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_corpus( + corpus=retriever.Corpus(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].corpus + mock_val = retriever.Corpus(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +def test_update_corpus_flattened_error(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_corpus( + retriever_service.UpdateCorpusRequest(), + corpus=retriever.Corpus(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_corpus_flattened_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_corpus), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever.Corpus() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(retriever.Corpus()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_corpus( + corpus=retriever.Corpus(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].corpus + mock_val = retriever.Corpus(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_update_corpus_flattened_error_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_corpus( + retriever_service.UpdateCorpusRequest(), + corpus=retriever.Corpus(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.DeleteCorpusRequest, + dict, + ], +) +def test_delete_corpus(request_type, transport: str = "grpc"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_corpus), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_corpus(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.DeleteCorpusRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_corpus_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_corpus), "__call__") as call: + client.delete_corpus() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.DeleteCorpusRequest() + + +@pytest.mark.asyncio +async def test_delete_corpus_async( + transport: str = "grpc_asyncio", request_type=retriever_service.DeleteCorpusRequest +): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_corpus), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_corpus(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.DeleteCorpusRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_corpus_async_from_dict(): + await test_delete_corpus_async(request_type=dict) + + +def test_delete_corpus_field_headers(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.DeleteCorpusRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_corpus), "__call__") as call: + call.return_value = None + client.delete_corpus(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_corpus_field_headers_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.DeleteCorpusRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_corpus), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_corpus(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_delete_corpus_flattened(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_corpus), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_corpus( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_delete_corpus_flattened_error(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_corpus( + retriever_service.DeleteCorpusRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_corpus_flattened_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_corpus), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_corpus( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_delete_corpus_flattened_error_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_corpus( + retriever_service.DeleteCorpusRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.ListCorporaRequest, + dict, + ], +) +def test_list_corpora(request_type, transport: str = "grpc"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_corpora), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever_service.ListCorporaResponse( + next_page_token="next_page_token_value", + ) + response = client.list_corpora(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.ListCorporaRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListCorporaPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_corpora_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_corpora), "__call__") as call: + client.list_corpora() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.ListCorporaRequest() + + +@pytest.mark.asyncio +async def test_list_corpora_async( + transport: str = "grpc_asyncio", request_type=retriever_service.ListCorporaRequest +): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_corpora), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + retriever_service.ListCorporaResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_corpora(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.ListCorporaRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListCorporaAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_corpora_async_from_dict(): + await test_list_corpora_async(request_type=dict) + + +def test_list_corpora_pager(transport_name: str = "grpc"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_corpora), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + retriever_service.ListCorporaResponse( + corpora=[ + retriever.Corpus(), + retriever.Corpus(), + retriever.Corpus(), + ], + next_page_token="abc", + ), + retriever_service.ListCorporaResponse( + corpora=[], + next_page_token="def", + ), + retriever_service.ListCorporaResponse( + corpora=[ + retriever.Corpus(), + ], + next_page_token="ghi", + ), + retriever_service.ListCorporaResponse( + corpora=[ + retriever.Corpus(), + retriever.Corpus(), + ], + ), + RuntimeError, + ) + + metadata = () + pager = client.list_corpora(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, retriever.Corpus) for i in results) + + +def test_list_corpora_pages(transport_name: str = "grpc"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_corpora), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + retriever_service.ListCorporaResponse( + corpora=[ + retriever.Corpus(), + retriever.Corpus(), + retriever.Corpus(), + ], + next_page_token="abc", + ), + retriever_service.ListCorporaResponse( + corpora=[], + next_page_token="def", + ), + retriever_service.ListCorporaResponse( + corpora=[ + retriever.Corpus(), + ], + next_page_token="ghi", + ), + retriever_service.ListCorporaResponse( + corpora=[ + retriever.Corpus(), + retriever.Corpus(), + ], + ), + RuntimeError, + ) + pages = list(client.list_corpora(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_corpora_async_pager(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_corpora), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + retriever_service.ListCorporaResponse( + corpora=[ + retriever.Corpus(), + retriever.Corpus(), + retriever.Corpus(), + ], + next_page_token="abc", + ), + retriever_service.ListCorporaResponse( + corpora=[], + next_page_token="def", + ), + retriever_service.ListCorporaResponse( + corpora=[ + retriever.Corpus(), + ], + next_page_token="ghi", + ), + retriever_service.ListCorporaResponse( + corpora=[ + retriever.Corpus(), + retriever.Corpus(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_corpora( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, retriever.Corpus) for i in responses) + + +@pytest.mark.asyncio +async def test_list_corpora_async_pages(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_corpora), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + retriever_service.ListCorporaResponse( + corpora=[ + retriever.Corpus(), + retriever.Corpus(), + retriever.Corpus(), + ], + next_page_token="abc", + ), + retriever_service.ListCorporaResponse( + corpora=[], + next_page_token="def", + ), + retriever_service.ListCorporaResponse( + corpora=[ + retriever.Corpus(), + ], + next_page_token="ghi", + ), + retriever_service.ListCorporaResponse( + corpora=[ + retriever.Corpus(), + retriever.Corpus(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_corpora(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.QueryCorpusRequest, + dict, + ], +) +def test_query_corpus(request_type, transport: str = "grpc"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.query_corpus), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever_service.QueryCorpusResponse() + response = client.query_corpus(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.QueryCorpusRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever_service.QueryCorpusResponse) + + +def test_query_corpus_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.query_corpus), "__call__") as call: + client.query_corpus() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.QueryCorpusRequest() + + +@pytest.mark.asyncio +async def test_query_corpus_async( + transport: str = "grpc_asyncio", request_type=retriever_service.QueryCorpusRequest +): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.query_corpus), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + retriever_service.QueryCorpusResponse() + ) + response = await client.query_corpus(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.QueryCorpusRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever_service.QueryCorpusResponse) + + +@pytest.mark.asyncio +async def test_query_corpus_async_from_dict(): + await test_query_corpus_async(request_type=dict) + + +def test_query_corpus_field_headers(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.QueryCorpusRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.query_corpus), "__call__") as call: + call.return_value = retriever_service.QueryCorpusResponse() + client.query_corpus(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_query_corpus_field_headers_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.QueryCorpusRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.query_corpus), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + retriever_service.QueryCorpusResponse() + ) + await client.query_corpus(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.CreateDocumentRequest, + dict, + ], +) +def test_create_document(request_type, transport: str = "grpc"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_document), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever.Document( + name="name_value", + display_name="display_name_value", + ) + response = client.create_document(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.CreateDocumentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever.Document) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + + +def test_create_document_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_document), "__call__") as call: + client.create_document() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.CreateDocumentRequest() + + +@pytest.mark.asyncio +async def test_create_document_async( + transport: str = "grpc_asyncio", + request_type=retriever_service.CreateDocumentRequest, +): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_document), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + retriever.Document( + name="name_value", + display_name="display_name_value", + ) + ) + response = await client.create_document(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.CreateDocumentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever.Document) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + + +@pytest.mark.asyncio +async def test_create_document_async_from_dict(): + await test_create_document_async(request_type=dict) + + +def test_create_document_field_headers(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.CreateDocumentRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_document), "__call__") as call: + call.return_value = retriever.Document() + client.create_document(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_document_field_headers_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.CreateDocumentRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_document), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(retriever.Document()) + await client.create_document(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_create_document_flattened(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_document), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever.Document() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_document( + parent="parent_value", + document=retriever.Document(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].document + mock_val = retriever.Document(name="name_value") + assert arg == mock_val + + +def test_create_document_flattened_error(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_document( + retriever_service.CreateDocumentRequest(), + parent="parent_value", + document=retriever.Document(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_document_flattened_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_document), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever.Document() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(retriever.Document()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_document( + parent="parent_value", + document=retriever.Document(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].document + mock_val = retriever.Document(name="name_value") + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_create_document_flattened_error_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_document( + retriever_service.CreateDocumentRequest(), + parent="parent_value", + document=retriever.Document(name="name_value"), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.GetDocumentRequest, + dict, + ], +) +def test_get_document(request_type, transport: str = "grpc"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_document), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever.Document( + name="name_value", + display_name="display_name_value", + ) + response = client.get_document(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.GetDocumentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever.Document) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + + +def test_get_document_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_document), "__call__") as call: + client.get_document() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.GetDocumentRequest() + + +@pytest.mark.asyncio +async def test_get_document_async( + transport: str = "grpc_asyncio", request_type=retriever_service.GetDocumentRequest +): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_document), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + retriever.Document( + name="name_value", + display_name="display_name_value", + ) + ) + response = await client.get_document(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.GetDocumentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever.Document) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + + +@pytest.mark.asyncio +async def test_get_document_async_from_dict(): + await test_get_document_async(request_type=dict) + + +def test_get_document_field_headers(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.GetDocumentRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_document), "__call__") as call: + call.return_value = retriever.Document() + client.get_document(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_document_field_headers_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.GetDocumentRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_document), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(retriever.Document()) + await client.get_document(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_document_flattened(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_document), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever.Document() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_document( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_document_flattened_error(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_document( + retriever_service.GetDocumentRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_document_flattened_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_document), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever.Document() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(retriever.Document()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_document( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_document_flattened_error_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_document( + retriever_service.GetDocumentRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.UpdateDocumentRequest, + dict, + ], +) +def test_update_document(request_type, transport: str = "grpc"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_document), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever.Document( + name="name_value", + display_name="display_name_value", + ) + response = client.update_document(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.UpdateDocumentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever.Document) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + + +def test_update_document_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_document), "__call__") as call: + client.update_document() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.UpdateDocumentRequest() + + +@pytest.mark.asyncio +async def test_update_document_async( + transport: str = "grpc_asyncio", + request_type=retriever_service.UpdateDocumentRequest, +): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_document), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + retriever.Document( + name="name_value", + display_name="display_name_value", + ) + ) + response = await client.update_document(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.UpdateDocumentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever.Document) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + + +@pytest.mark.asyncio +async def test_update_document_async_from_dict(): + await test_update_document_async(request_type=dict) + + +def test_update_document_field_headers(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.UpdateDocumentRequest() + + request.document.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_document), "__call__") as call: + call.return_value = retriever.Document() + client.update_document(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "document.name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_document_field_headers_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.UpdateDocumentRequest() + + request.document.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_document), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(retriever.Document()) + await client.update_document(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "document.name=name_value", + ) in kw["metadata"] + + +def test_update_document_flattened(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_document), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever.Document() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_document( + document=retriever.Document(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].document + mock_val = retriever.Document(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +def test_update_document_flattened_error(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_document( + retriever_service.UpdateDocumentRequest(), + document=retriever.Document(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_document_flattened_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_document), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever.Document() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(retriever.Document()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_document( + document=retriever.Document(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].document + mock_val = retriever.Document(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_update_document_flattened_error_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_document( + retriever_service.UpdateDocumentRequest(), + document=retriever.Document(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.DeleteDocumentRequest, + dict, + ], +) +def test_delete_document(request_type, transport: str = "grpc"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_document), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_document(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.DeleteDocumentRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_document_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_document), "__call__") as call: + client.delete_document() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.DeleteDocumentRequest() + + +@pytest.mark.asyncio +async def test_delete_document_async( + transport: str = "grpc_asyncio", + request_type=retriever_service.DeleteDocumentRequest, +): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_document), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_document(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.DeleteDocumentRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_document_async_from_dict(): + await test_delete_document_async(request_type=dict) + + +def test_delete_document_field_headers(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.DeleteDocumentRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_document), "__call__") as call: + call.return_value = None + client.delete_document(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_document_field_headers_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.DeleteDocumentRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_document), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_document(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_delete_document_flattened(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_document), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_document( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_delete_document_flattened_error(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_document( + retriever_service.DeleteDocumentRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_document_flattened_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_document), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_document( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_delete_document_flattened_error_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_document( + retriever_service.DeleteDocumentRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.ListDocumentsRequest, + dict, + ], +) +def test_list_documents(request_type, transport: str = "grpc"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_documents), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever_service.ListDocumentsResponse( + next_page_token="next_page_token_value", + ) + response = client.list_documents(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.ListDocumentsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDocumentsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_documents_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_documents), "__call__") as call: + client.list_documents() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.ListDocumentsRequest() + + +@pytest.mark.asyncio +async def test_list_documents_async( + transport: str = "grpc_asyncio", request_type=retriever_service.ListDocumentsRequest +): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_documents), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + retriever_service.ListDocumentsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_documents(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.ListDocumentsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDocumentsAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_documents_async_from_dict(): + await test_list_documents_async(request_type=dict) + + +def test_list_documents_field_headers(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.ListDocumentsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_documents), "__call__") as call: + call.return_value = retriever_service.ListDocumentsResponse() + client.list_documents(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_documents_field_headers_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.ListDocumentsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_documents), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + retriever_service.ListDocumentsResponse() + ) + await client.list_documents(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_documents_flattened(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_documents), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever_service.ListDocumentsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_documents( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +def test_list_documents_flattened_error(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_documents( + retriever_service.ListDocumentsRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_documents_flattened_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_documents), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever_service.ListDocumentsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + retriever_service.ListDocumentsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_documents( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_documents_flattened_error_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_documents( + retriever_service.ListDocumentsRequest(), + parent="parent_value", + ) + + +def test_list_documents_pager(transport_name: str = "grpc"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_documents), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + retriever_service.ListDocumentsResponse( + documents=[ + retriever.Document(), + retriever.Document(), + retriever.Document(), + ], + next_page_token="abc", + ), + retriever_service.ListDocumentsResponse( + documents=[], + next_page_token="def", + ), + retriever_service.ListDocumentsResponse( + documents=[ + retriever.Document(), + ], + next_page_token="ghi", + ), + retriever_service.ListDocumentsResponse( + documents=[ + retriever.Document(), + retriever.Document(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_documents(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, retriever.Document) for i in results) + + +def test_list_documents_pages(transport_name: str = "grpc"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_documents), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + retriever_service.ListDocumentsResponse( + documents=[ + retriever.Document(), + retriever.Document(), + retriever.Document(), + ], + next_page_token="abc", + ), + retriever_service.ListDocumentsResponse( + documents=[], + next_page_token="def", + ), + retriever_service.ListDocumentsResponse( + documents=[ + retriever.Document(), + ], + next_page_token="ghi", + ), + retriever_service.ListDocumentsResponse( + documents=[ + retriever.Document(), + retriever.Document(), + ], + ), + RuntimeError, + ) + pages = list(client.list_documents(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_documents_async_pager(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_documents), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + retriever_service.ListDocumentsResponse( + documents=[ + retriever.Document(), + retriever.Document(), + retriever.Document(), + ], + next_page_token="abc", + ), + retriever_service.ListDocumentsResponse( + documents=[], + next_page_token="def", + ), + retriever_service.ListDocumentsResponse( + documents=[ + retriever.Document(), + ], + next_page_token="ghi", + ), + retriever_service.ListDocumentsResponse( + documents=[ + retriever.Document(), + retriever.Document(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_documents( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, retriever.Document) for i in responses) + + +@pytest.mark.asyncio +async def test_list_documents_async_pages(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_documents), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + retriever_service.ListDocumentsResponse( + documents=[ + retriever.Document(), + retriever.Document(), + retriever.Document(), + ], + next_page_token="abc", + ), + retriever_service.ListDocumentsResponse( + documents=[], + next_page_token="def", + ), + retriever_service.ListDocumentsResponse( + documents=[ + retriever.Document(), + ], + next_page_token="ghi", + ), + retriever_service.ListDocumentsResponse( + documents=[ + retriever.Document(), + retriever.Document(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_documents(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.QueryDocumentRequest, + dict, + ], +) +def test_query_document(request_type, transport: str = "grpc"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.query_document), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever_service.QueryDocumentResponse() + response = client.query_document(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.QueryDocumentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever_service.QueryDocumentResponse) + + +def test_query_document_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.query_document), "__call__") as call: + client.query_document() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.QueryDocumentRequest() + + +@pytest.mark.asyncio +async def test_query_document_async( + transport: str = "grpc_asyncio", request_type=retriever_service.QueryDocumentRequest +): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.query_document), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + retriever_service.QueryDocumentResponse() + ) + response = await client.query_document(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.QueryDocumentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever_service.QueryDocumentResponse) + + +@pytest.mark.asyncio +async def test_query_document_async_from_dict(): + await test_query_document_async(request_type=dict) + + +def test_query_document_field_headers(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.QueryDocumentRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.query_document), "__call__") as call: + call.return_value = retriever_service.QueryDocumentResponse() + client.query_document(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_query_document_field_headers_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.QueryDocumentRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.query_document), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + retriever_service.QueryDocumentResponse() + ) + await client.query_document(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.CreateChunkRequest, + dict, + ], +) +def test_create_chunk(request_type, transport: str = "grpc"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_chunk), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever.Chunk( + name="name_value", + state=retriever.Chunk.State.STATE_PENDING_PROCESSING, + ) + response = client.create_chunk(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.CreateChunkRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever.Chunk) + assert response.name == "name_value" + assert response.state == retriever.Chunk.State.STATE_PENDING_PROCESSING + + +def test_create_chunk_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_chunk), "__call__") as call: + client.create_chunk() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.CreateChunkRequest() + + +@pytest.mark.asyncio +async def test_create_chunk_async( + transport: str = "grpc_asyncio", request_type=retriever_service.CreateChunkRequest +): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_chunk), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + retriever.Chunk( + name="name_value", + state=retriever.Chunk.State.STATE_PENDING_PROCESSING, + ) + ) + response = await client.create_chunk(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.CreateChunkRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever.Chunk) + assert response.name == "name_value" + assert response.state == retriever.Chunk.State.STATE_PENDING_PROCESSING + + +@pytest.mark.asyncio +async def test_create_chunk_async_from_dict(): + await test_create_chunk_async(request_type=dict) + + +def test_create_chunk_field_headers(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.CreateChunkRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_chunk), "__call__") as call: + call.return_value = retriever.Chunk() + client.create_chunk(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_chunk_field_headers_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.CreateChunkRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_chunk), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(retriever.Chunk()) + await client.create_chunk(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_create_chunk_flattened(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_chunk), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever.Chunk() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_chunk( + parent="parent_value", + chunk=retriever.Chunk(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].chunk + mock_val = retriever.Chunk(name="name_value") + assert arg == mock_val + + +def test_create_chunk_flattened_error(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_chunk( + retriever_service.CreateChunkRequest(), + parent="parent_value", + chunk=retriever.Chunk(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_chunk_flattened_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_chunk), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever.Chunk() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(retriever.Chunk()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_chunk( + parent="parent_value", + chunk=retriever.Chunk(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].chunk + mock_val = retriever.Chunk(name="name_value") + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_create_chunk_flattened_error_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_chunk( + retriever_service.CreateChunkRequest(), + parent="parent_value", + chunk=retriever.Chunk(name="name_value"), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.BatchCreateChunksRequest, + dict, + ], +) +def test_batch_create_chunks(request_type, transport: str = "grpc"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_chunks), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = retriever_service.BatchCreateChunksResponse() + response = client.batch_create_chunks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.BatchCreateChunksRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever_service.BatchCreateChunksResponse) + + +def test_batch_create_chunks_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_chunks), "__call__" + ) as call: + client.batch_create_chunks() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.BatchCreateChunksRequest() + + +@pytest.mark.asyncio +async def test_batch_create_chunks_async( + transport: str = "grpc_asyncio", + request_type=retriever_service.BatchCreateChunksRequest, +): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_chunks), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + retriever_service.BatchCreateChunksResponse() + ) + response = await client.batch_create_chunks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.BatchCreateChunksRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever_service.BatchCreateChunksResponse) + + +@pytest.mark.asyncio +async def test_batch_create_chunks_async_from_dict(): + await test_batch_create_chunks_async(request_type=dict) + + +def test_batch_create_chunks_field_headers(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.BatchCreateChunksRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_chunks), "__call__" + ) as call: + call.return_value = retriever_service.BatchCreateChunksResponse() + client.batch_create_chunks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_batch_create_chunks_field_headers_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.BatchCreateChunksRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_chunks), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + retriever_service.BatchCreateChunksResponse() + ) + await client.batch_create_chunks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.GetChunkRequest, + dict, + ], +) +def test_get_chunk(request_type, transport: str = "grpc"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_chunk), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever.Chunk( + name="name_value", + state=retriever.Chunk.State.STATE_PENDING_PROCESSING, + ) + response = client.get_chunk(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.GetChunkRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever.Chunk) + assert response.name == "name_value" + assert response.state == retriever.Chunk.State.STATE_PENDING_PROCESSING + + +def test_get_chunk_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_chunk), "__call__") as call: + client.get_chunk() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.GetChunkRequest() + + +@pytest.mark.asyncio +async def test_get_chunk_async( + transport: str = "grpc_asyncio", request_type=retriever_service.GetChunkRequest +): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_chunk), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + retriever.Chunk( + name="name_value", + state=retriever.Chunk.State.STATE_PENDING_PROCESSING, + ) + ) + response = await client.get_chunk(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.GetChunkRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever.Chunk) + assert response.name == "name_value" + assert response.state == retriever.Chunk.State.STATE_PENDING_PROCESSING + + +@pytest.mark.asyncio +async def test_get_chunk_async_from_dict(): + await test_get_chunk_async(request_type=dict) + + +def test_get_chunk_field_headers(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.GetChunkRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_chunk), "__call__") as call: + call.return_value = retriever.Chunk() + client.get_chunk(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_chunk_field_headers_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.GetChunkRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_chunk), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(retriever.Chunk()) + await client.get_chunk(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_chunk_flattened(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_chunk), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever.Chunk() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_chunk( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_chunk_flattened_error(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_chunk( + retriever_service.GetChunkRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_chunk_flattened_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_chunk), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever.Chunk() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(retriever.Chunk()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_chunk( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_chunk_flattened_error_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_chunk( + retriever_service.GetChunkRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.UpdateChunkRequest, + dict, + ], +) +def test_update_chunk(request_type, transport: str = "grpc"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_chunk), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever.Chunk( + name="name_value", + state=retriever.Chunk.State.STATE_PENDING_PROCESSING, + ) + response = client.update_chunk(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.UpdateChunkRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever.Chunk) + assert response.name == "name_value" + assert response.state == retriever.Chunk.State.STATE_PENDING_PROCESSING + + +def test_update_chunk_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_chunk), "__call__") as call: + client.update_chunk() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.UpdateChunkRequest() + + +@pytest.mark.asyncio +async def test_update_chunk_async( + transport: str = "grpc_asyncio", request_type=retriever_service.UpdateChunkRequest +): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_chunk), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + retriever.Chunk( + name="name_value", + state=retriever.Chunk.State.STATE_PENDING_PROCESSING, + ) + ) + response = await client.update_chunk(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.UpdateChunkRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever.Chunk) + assert response.name == "name_value" + assert response.state == retriever.Chunk.State.STATE_PENDING_PROCESSING + + +@pytest.mark.asyncio +async def test_update_chunk_async_from_dict(): + await test_update_chunk_async(request_type=dict) + + +def test_update_chunk_field_headers(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.UpdateChunkRequest() + + request.chunk.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_chunk), "__call__") as call: + call.return_value = retriever.Chunk() + client.update_chunk(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "chunk.name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_chunk_field_headers_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.UpdateChunkRequest() + + request.chunk.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_chunk), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(retriever.Chunk()) + await client.update_chunk(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "chunk.name=name_value", + ) in kw["metadata"] + + +def test_update_chunk_flattened(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_chunk), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever.Chunk() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_chunk( + chunk=retriever.Chunk(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].chunk + mock_val = retriever.Chunk(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +def test_update_chunk_flattened_error(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_chunk( + retriever_service.UpdateChunkRequest(), + chunk=retriever.Chunk(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_chunk_flattened_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_chunk), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever.Chunk() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(retriever.Chunk()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_chunk( + chunk=retriever.Chunk(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].chunk + mock_val = retriever.Chunk(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_update_chunk_flattened_error_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_chunk( + retriever_service.UpdateChunkRequest(), + chunk=retriever.Chunk(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.BatchUpdateChunksRequest, + dict, + ], +) +def test_batch_update_chunks(request_type, transport: str = "grpc"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_update_chunks), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = retriever_service.BatchUpdateChunksResponse() + response = client.batch_update_chunks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.BatchUpdateChunksRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever_service.BatchUpdateChunksResponse) + + +def test_batch_update_chunks_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_update_chunks), "__call__" + ) as call: + client.batch_update_chunks() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.BatchUpdateChunksRequest() + + +@pytest.mark.asyncio +async def test_batch_update_chunks_async( + transport: str = "grpc_asyncio", + request_type=retriever_service.BatchUpdateChunksRequest, +): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_update_chunks), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + retriever_service.BatchUpdateChunksResponse() + ) + response = await client.batch_update_chunks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.BatchUpdateChunksRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever_service.BatchUpdateChunksResponse) + + +@pytest.mark.asyncio +async def test_batch_update_chunks_async_from_dict(): + await test_batch_update_chunks_async(request_type=dict) + + +def test_batch_update_chunks_field_headers(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.BatchUpdateChunksRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_update_chunks), "__call__" + ) as call: + call.return_value = retriever_service.BatchUpdateChunksResponse() + client.batch_update_chunks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_batch_update_chunks_field_headers_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.BatchUpdateChunksRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_update_chunks), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + retriever_service.BatchUpdateChunksResponse() + ) + await client.batch_update_chunks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.DeleteChunkRequest, + dict, + ], +) +def test_delete_chunk(request_type, transport: str = "grpc"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_chunk), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_chunk(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.DeleteChunkRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_chunk_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_chunk), "__call__") as call: + client.delete_chunk() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.DeleteChunkRequest() + + +@pytest.mark.asyncio +async def test_delete_chunk_async( + transport: str = "grpc_asyncio", request_type=retriever_service.DeleteChunkRequest +): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_chunk), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_chunk(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.DeleteChunkRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_chunk_async_from_dict(): + await test_delete_chunk_async(request_type=dict) + + +def test_delete_chunk_field_headers(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.DeleteChunkRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_chunk), "__call__") as call: + call.return_value = None + client.delete_chunk(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_chunk_field_headers_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.DeleteChunkRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_chunk), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_chunk(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_delete_chunk_flattened(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_chunk), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_chunk( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_delete_chunk_flattened_error(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_chunk( + retriever_service.DeleteChunkRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_chunk_flattened_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_chunk), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_chunk( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_delete_chunk_flattened_error_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_chunk( + retriever_service.DeleteChunkRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.BatchDeleteChunksRequest, + dict, + ], +) +def test_batch_delete_chunks(request_type, transport: str = "grpc"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_delete_chunks), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.batch_delete_chunks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.BatchDeleteChunksRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_batch_delete_chunks_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_delete_chunks), "__call__" + ) as call: + client.batch_delete_chunks() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.BatchDeleteChunksRequest() + + +@pytest.mark.asyncio +async def test_batch_delete_chunks_async( + transport: str = "grpc_asyncio", + request_type=retriever_service.BatchDeleteChunksRequest, +): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_delete_chunks), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.batch_delete_chunks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.BatchDeleteChunksRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_batch_delete_chunks_async_from_dict(): + await test_batch_delete_chunks_async(request_type=dict) + + +def test_batch_delete_chunks_field_headers(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.BatchDeleteChunksRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_delete_chunks), "__call__" + ) as call: + call.return_value = None + client.batch_delete_chunks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_batch_delete_chunks_field_headers_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.BatchDeleteChunksRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_delete_chunks), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.batch_delete_chunks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.ListChunksRequest, + dict, + ], +) +def test_list_chunks(request_type, transport: str = "grpc"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_chunks), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever_service.ListChunksResponse( + next_page_token="next_page_token_value", + ) + response = client.list_chunks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.ListChunksRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListChunksPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_chunks_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_chunks), "__call__") as call: + client.list_chunks() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.ListChunksRequest() + + +@pytest.mark.asyncio +async def test_list_chunks_async( + transport: str = "grpc_asyncio", request_type=retriever_service.ListChunksRequest +): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_chunks), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + retriever_service.ListChunksResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_chunks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == retriever_service.ListChunksRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListChunksAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_chunks_async_from_dict(): + await test_list_chunks_async(request_type=dict) + + +def test_list_chunks_field_headers(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.ListChunksRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_chunks), "__call__") as call: + call.return_value = retriever_service.ListChunksResponse() + client.list_chunks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_chunks_field_headers_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = retriever_service.ListChunksRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_chunks), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + retriever_service.ListChunksResponse() + ) + await client.list_chunks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_chunks_flattened(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_chunks), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever_service.ListChunksResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_chunks( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +def test_list_chunks_flattened_error(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_chunks( + retriever_service.ListChunksRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_chunks_flattened_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_chunks), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = retriever_service.ListChunksResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + retriever_service.ListChunksResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_chunks( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_chunks_flattened_error_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_chunks( + retriever_service.ListChunksRequest(), + parent="parent_value", + ) + + +def test_list_chunks_pager(transport_name: str = "grpc"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_chunks), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + retriever_service.ListChunksResponse( + chunks=[ + retriever.Chunk(), + retriever.Chunk(), + retriever.Chunk(), + ], + next_page_token="abc", + ), + retriever_service.ListChunksResponse( + chunks=[], + next_page_token="def", + ), + retriever_service.ListChunksResponse( + chunks=[ + retriever.Chunk(), + ], + next_page_token="ghi", + ), + retriever_service.ListChunksResponse( + chunks=[ + retriever.Chunk(), + retriever.Chunk(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_chunks(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, retriever.Chunk) for i in results) + + +def test_list_chunks_pages(transport_name: str = "grpc"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_chunks), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + retriever_service.ListChunksResponse( + chunks=[ + retriever.Chunk(), + retriever.Chunk(), + retriever.Chunk(), + ], + next_page_token="abc", + ), + retriever_service.ListChunksResponse( + chunks=[], + next_page_token="def", + ), + retriever_service.ListChunksResponse( + chunks=[ + retriever.Chunk(), + ], + next_page_token="ghi", + ), + retriever_service.ListChunksResponse( + chunks=[ + retriever.Chunk(), + retriever.Chunk(), + ], + ), + RuntimeError, + ) + pages = list(client.list_chunks(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_chunks_async_pager(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_chunks), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + retriever_service.ListChunksResponse( + chunks=[ + retriever.Chunk(), + retriever.Chunk(), + retriever.Chunk(), + ], + next_page_token="abc", + ), + retriever_service.ListChunksResponse( + chunks=[], + next_page_token="def", + ), + retriever_service.ListChunksResponse( + chunks=[ + retriever.Chunk(), + ], + next_page_token="ghi", + ), + retriever_service.ListChunksResponse( + chunks=[ + retriever.Chunk(), + retriever.Chunk(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_chunks( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, retriever.Chunk) for i in responses) + + +@pytest.mark.asyncio +async def test_list_chunks_async_pages(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_chunks), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + retriever_service.ListChunksResponse( + chunks=[ + retriever.Chunk(), + retriever.Chunk(), + retriever.Chunk(), + ], + next_page_token="abc", + ), + retriever_service.ListChunksResponse( + chunks=[], + next_page_token="def", + ), + retriever_service.ListChunksResponse( + chunks=[ + retriever.Chunk(), + ], + next_page_token="ghi", + ), + retriever_service.ListChunksResponse( + chunks=[ + retriever.Chunk(), + retriever.Chunk(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_chunks(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.CreateCorpusRequest, + dict, + ], +) +def test_create_corpus_rest(request_type): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {} + request_init["corpus"] = { + "name": "name_value", + "display_name": "display_name_value", + "create_time": {"seconds": 751, "nanos": 543}, + "update_time": {}, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = retriever_service.CreateCorpusRequest.meta.fields["corpus"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["corpus"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["corpus"][field])): + del request_init["corpus"][field][i][subfield] + else: + del request_init["corpus"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = retriever.Corpus( + name="name_value", + display_name="display_name_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = retriever.Corpus.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.create_corpus(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever.Corpus) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + + +def test_create_corpus_rest_required_fields( + request_type=retriever_service.CreateCorpusRequest, +): + transport_class = transports.RetrieverServiceRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_corpus._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_corpus._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = retriever.Corpus() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = retriever.Corpus.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.create_corpus(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_corpus_rest_unset_required_fields(): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_corpus._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("corpus",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_corpus_rest_interceptors(null_interceptor): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.RetrieverServiceRestInterceptor(), + ) + client = RetrieverServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "post_create_corpus" + ) as post, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "pre_create_corpus" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = retriever_service.CreateCorpusRequest.pb( + retriever_service.CreateCorpusRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = retriever.Corpus.to_json(retriever.Corpus()) + + request = retriever_service.CreateCorpusRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = retriever.Corpus() + + client.create_corpus( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_create_corpus_rest_bad_request( + transport: str = "rest", request_type=retriever_service.CreateCorpusRequest +): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_corpus(request) + + +def test_create_corpus_rest_flattened(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = retriever.Corpus() + + # get arguments that satisfy an http rule for this method + sample_request = {} + + # get truthy value for each flattened field + mock_args = dict( + corpus=retriever.Corpus(name="name_value"), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = retriever.Corpus.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.create_corpus(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/corpora" % client.transport._host, args[1] + ) + + +def test_create_corpus_rest_flattened_error(transport: str = "rest"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_corpus( + retriever_service.CreateCorpusRequest(), + corpus=retriever.Corpus(name="name_value"), + ) + + +def test_create_corpus_rest_error(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.GetCorpusRequest, + dict, + ], +) +def test_get_corpus_rest(request_type): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "corpora/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = retriever.Corpus( + name="name_value", + display_name="display_name_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = retriever.Corpus.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_corpus(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever.Corpus) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + + +def test_get_corpus_rest_required_fields( + request_type=retriever_service.GetCorpusRequest, +): + transport_class = transports.RetrieverServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_corpus._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_corpus._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = retriever.Corpus() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = retriever.Corpus.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_corpus(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_corpus_rest_unset_required_fields(): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_corpus._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_corpus_rest_interceptors(null_interceptor): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.RetrieverServiceRestInterceptor(), + ) + client = RetrieverServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "post_get_corpus" + ) as post, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "pre_get_corpus" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = retriever_service.GetCorpusRequest.pb( + retriever_service.GetCorpusRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = retriever.Corpus.to_json(retriever.Corpus()) + + request = retriever_service.GetCorpusRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = retriever.Corpus() + + client.get_corpus( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_corpus_rest_bad_request( + transport: str = "rest", request_type=retriever_service.GetCorpusRequest +): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "corpora/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_corpus(request) + + +def test_get_corpus_rest_flattened(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = retriever.Corpus() + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "corpora/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = retriever.Corpus.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_corpus(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{name=corpora/*}" % client.transport._host, args[1] + ) + + +def test_get_corpus_rest_flattened_error(transport: str = "rest"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_corpus( + retriever_service.GetCorpusRequest(), + name="name_value", + ) + + +def test_get_corpus_rest_error(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.UpdateCorpusRequest, + dict, + ], +) +def test_update_corpus_rest(request_type): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"corpus": {"name": "corpora/sample1"}} + request_init["corpus"] = { + "name": "corpora/sample1", + "display_name": "display_name_value", + "create_time": {"seconds": 751, "nanos": 543}, + "update_time": {}, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = retriever_service.UpdateCorpusRequest.meta.fields["corpus"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["corpus"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["corpus"][field])): + del request_init["corpus"][field][i][subfield] + else: + del request_init["corpus"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = retriever.Corpus( + name="name_value", + display_name="display_name_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = retriever.Corpus.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.update_corpus(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever.Corpus) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + + +def test_update_corpus_rest_required_fields( + request_type=retriever_service.UpdateCorpusRequest, +): + transport_class = transports.RetrieverServiceRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_corpus._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_corpus._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = retriever.Corpus() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = retriever.Corpus.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.update_corpus(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_update_corpus_rest_unset_required_fields(): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.update_corpus._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("updateMask",)) + & set( + ( + "corpus", + "updateMask", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_corpus_rest_interceptors(null_interceptor): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.RetrieverServiceRestInterceptor(), + ) + client = RetrieverServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "post_update_corpus" + ) as post, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "pre_update_corpus" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = retriever_service.UpdateCorpusRequest.pb( + retriever_service.UpdateCorpusRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = retriever.Corpus.to_json(retriever.Corpus()) + + request = retriever_service.UpdateCorpusRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = retriever.Corpus() + + client.update_corpus( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_update_corpus_rest_bad_request( + transport: str = "rest", request_type=retriever_service.UpdateCorpusRequest +): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"corpus": {"name": "corpora/sample1"}} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_corpus(request) + + +def test_update_corpus_rest_flattened(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = retriever.Corpus() + + # get arguments that satisfy an http rule for this method + sample_request = {"corpus": {"name": "corpora/sample1"}} + + # get truthy value for each flattened field + mock_args = dict( + corpus=retriever.Corpus(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = retriever.Corpus.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.update_corpus(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{corpus.name=corpora/*}" % client.transport._host, args[1] + ) + + +def test_update_corpus_rest_flattened_error(transport: str = "rest"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_corpus( + retriever_service.UpdateCorpusRequest(), + corpus=retriever.Corpus(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_update_corpus_rest_error(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.DeleteCorpusRequest, + dict, + ], +) +def test_delete_corpus_rest(request_type): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "corpora/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.delete_corpus(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_corpus_rest_required_fields( + request_type=retriever_service.DeleteCorpusRequest, +): + transport_class = transports.RetrieverServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_corpus._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_corpus._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("force",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.delete_corpus(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_corpus_rest_unset_required_fields(): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_corpus._get_unset_required_fields({}) + assert set(unset_fields) == (set(("force",)) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_corpus_rest_interceptors(null_interceptor): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.RetrieverServiceRestInterceptor(), + ) + client = RetrieverServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "pre_delete_corpus" + ) as pre: + pre.assert_not_called() + pb_message = retriever_service.DeleteCorpusRequest.pb( + retriever_service.DeleteCorpusRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + + request = retriever_service.DeleteCorpusRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.delete_corpus( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + + +def test_delete_corpus_rest_bad_request( + transport: str = "rest", request_type=retriever_service.DeleteCorpusRequest +): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "corpora/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_corpus(request) + + +def test_delete_corpus_rest_flattened(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "corpora/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.delete_corpus(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{name=corpora/*}" % client.transport._host, args[1] + ) + + +def test_delete_corpus_rest_flattened_error(transport: str = "rest"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_corpus( + retriever_service.DeleteCorpusRequest(), + name="name_value", + ) + + +def test_delete_corpus_rest_error(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.ListCorporaRequest, + dict, + ], +) +def test_list_corpora_rest(request_type): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = retriever_service.ListCorporaResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = retriever_service.ListCorporaResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_corpora(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListCorporaPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_corpora_rest_interceptors(null_interceptor): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.RetrieverServiceRestInterceptor(), + ) + client = RetrieverServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "post_list_corpora" + ) as post, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "pre_list_corpora" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = retriever_service.ListCorporaRequest.pb( + retriever_service.ListCorporaRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = retriever_service.ListCorporaResponse.to_json( + retriever_service.ListCorporaResponse() + ) + + request = retriever_service.ListCorporaRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = retriever_service.ListCorporaResponse() + + client.list_corpora( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_corpora_rest_bad_request( + transport: str = "rest", request_type=retriever_service.ListCorporaRequest +): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_corpora(request) + + +def test_list_corpora_rest_pager(transport: str = "rest"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + retriever_service.ListCorporaResponse( + corpora=[ + retriever.Corpus(), + retriever.Corpus(), + retriever.Corpus(), + ], + next_page_token="abc", + ), + retriever_service.ListCorporaResponse( + corpora=[], + next_page_token="def", + ), + retriever_service.ListCorporaResponse( + corpora=[ + retriever.Corpus(), + ], + next_page_token="ghi", + ), + retriever_service.ListCorporaResponse( + corpora=[ + retriever.Corpus(), + retriever.Corpus(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + retriever_service.ListCorporaResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {} + + pager = client.list_corpora(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, retriever.Corpus) for i in results) + + pages = list(client.list_corpora(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.QueryCorpusRequest, + dict, + ], +) +def test_query_corpus_rest(request_type): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "corpora/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = retriever_service.QueryCorpusResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = retriever_service.QueryCorpusResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.query_corpus(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever_service.QueryCorpusResponse) + + +def test_query_corpus_rest_required_fields( + request_type=retriever_service.QueryCorpusRequest, +): + transport_class = transports.RetrieverServiceRestTransport + + request_init = {} + request_init["name"] = "" + request_init["query"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).query_corpus._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + jsonified_request["query"] = "query_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).query_corpus._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + assert "query" in jsonified_request + assert jsonified_request["query"] == "query_value" + + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = retriever_service.QueryCorpusResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = retriever_service.QueryCorpusResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.query_corpus(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_query_corpus_rest_unset_required_fields(): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.query_corpus._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "name", + "query", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_query_corpus_rest_interceptors(null_interceptor): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.RetrieverServiceRestInterceptor(), + ) + client = RetrieverServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "post_query_corpus" + ) as post, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "pre_query_corpus" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = retriever_service.QueryCorpusRequest.pb( + retriever_service.QueryCorpusRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = retriever_service.QueryCorpusResponse.to_json( + retriever_service.QueryCorpusResponse() + ) + + request = retriever_service.QueryCorpusRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = retriever_service.QueryCorpusResponse() + + client.query_corpus( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_query_corpus_rest_bad_request( + transport: str = "rest", request_type=retriever_service.QueryCorpusRequest +): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "corpora/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.query_corpus(request) + + +def test_query_corpus_rest_error(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.CreateDocumentRequest, + dict, + ], +) +def test_create_document_rest(request_type): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "corpora/sample1"} + request_init["document"] = { + "name": "name_value", + "display_name": "display_name_value", + "custom_metadata": [ + { + "string_value": "string_value_value", + "string_list_value": {"values": ["values_value1", "values_value2"]}, + "numeric_value": 0.1391, + "key": "key_value", + } + ], + "update_time": {"seconds": 751, "nanos": 543}, + "create_time": {}, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = retriever_service.CreateDocumentRequest.meta.fields["document"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["document"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["document"][field])): + del request_init["document"][field][i][subfield] + else: + del request_init["document"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = retriever.Document( + name="name_value", + display_name="display_name_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = retriever.Document.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.create_document(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever.Document) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + + +def test_create_document_rest_required_fields( + request_type=retriever_service.CreateDocumentRequest, +): + transport_class = transports.RetrieverServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_document._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_document._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = retriever.Document() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = retriever.Document.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.create_document(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_document_rest_unset_required_fields(): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_document._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "parent", + "document", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_document_rest_interceptors(null_interceptor): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.RetrieverServiceRestInterceptor(), + ) + client = RetrieverServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "post_create_document" + ) as post, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "pre_create_document" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = retriever_service.CreateDocumentRequest.pb( + retriever_service.CreateDocumentRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = retriever.Document.to_json(retriever.Document()) + + request = retriever_service.CreateDocumentRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = retriever.Document() + + client.create_document( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_create_document_rest_bad_request( + transport: str = "rest", request_type=retriever_service.CreateDocumentRequest +): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "corpora/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_document(request) + + +def test_create_document_rest_flattened(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = retriever.Document() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "corpora/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + document=retriever.Document(name="name_value"), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = retriever.Document.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.create_document(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{parent=corpora/*}/documents" % client.transport._host, args[1] + ) + + +def test_create_document_rest_flattened_error(transport: str = "rest"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_document( + retriever_service.CreateDocumentRequest(), + parent="parent_value", + document=retriever.Document(name="name_value"), + ) + + +def test_create_document_rest_error(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.GetDocumentRequest, + dict, + ], +) +def test_get_document_rest(request_type): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "corpora/sample1/documents/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = retriever.Document( + name="name_value", + display_name="display_name_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = retriever.Document.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_document(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever.Document) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + + +def test_get_document_rest_required_fields( + request_type=retriever_service.GetDocumentRequest, +): + transport_class = transports.RetrieverServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_document._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_document._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = retriever.Document() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = retriever.Document.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_document(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_document_rest_unset_required_fields(): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_document._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_document_rest_interceptors(null_interceptor): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.RetrieverServiceRestInterceptor(), + ) + client = RetrieverServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "post_get_document" + ) as post, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "pre_get_document" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = retriever_service.GetDocumentRequest.pb( + retriever_service.GetDocumentRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = retriever.Document.to_json(retriever.Document()) + + request = retriever_service.GetDocumentRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = retriever.Document() + + client.get_document( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_document_rest_bad_request( + transport: str = "rest", request_type=retriever_service.GetDocumentRequest +): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "corpora/sample1/documents/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_document(request) + + +def test_get_document_rest_flattened(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = retriever.Document() + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "corpora/sample1/documents/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = retriever.Document.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_document(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{name=corpora/*/documents/*}" % client.transport._host, args[1] + ) + + +def test_get_document_rest_flattened_error(transport: str = "rest"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_document( + retriever_service.GetDocumentRequest(), + name="name_value", + ) + + +def test_get_document_rest_error(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.UpdateDocumentRequest, + dict, + ], +) +def test_update_document_rest(request_type): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"document": {"name": "corpora/sample1/documents/sample2"}} + request_init["document"] = { + "name": "corpora/sample1/documents/sample2", + "display_name": "display_name_value", + "custom_metadata": [ + { + "string_value": "string_value_value", + "string_list_value": {"values": ["values_value1", "values_value2"]}, + "numeric_value": 0.1391, + "key": "key_value", + } + ], + "update_time": {"seconds": 751, "nanos": 543}, + "create_time": {}, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = retriever_service.UpdateDocumentRequest.meta.fields["document"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["document"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["document"][field])): + del request_init["document"][field][i][subfield] + else: + del request_init["document"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = retriever.Document( + name="name_value", + display_name="display_name_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = retriever.Document.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.update_document(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever.Document) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + + +def test_update_document_rest_required_fields( + request_type=retriever_service.UpdateDocumentRequest, +): + transport_class = transports.RetrieverServiceRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_document._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_document._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = retriever.Document() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = retriever.Document.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.update_document(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_update_document_rest_unset_required_fields(): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.update_document._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("updateMask",)) + & set( + ( + "document", + "updateMask", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_document_rest_interceptors(null_interceptor): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.RetrieverServiceRestInterceptor(), + ) + client = RetrieverServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "post_update_document" + ) as post, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "pre_update_document" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = retriever_service.UpdateDocumentRequest.pb( + retriever_service.UpdateDocumentRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = retriever.Document.to_json(retriever.Document()) + + request = retriever_service.UpdateDocumentRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = retriever.Document() + + client.update_document( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_update_document_rest_bad_request( + transport: str = "rest", request_type=retriever_service.UpdateDocumentRequest +): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"document": {"name": "corpora/sample1/documents/sample2"}} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_document(request) + + +def test_update_document_rest_flattened(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = retriever.Document() + + # get arguments that satisfy an http rule for this method + sample_request = {"document": {"name": "corpora/sample1/documents/sample2"}} + + # get truthy value for each flattened field + mock_args = dict( + document=retriever.Document(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = retriever.Document.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.update_document(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{document.name=corpora/*/documents/*}" % client.transport._host, + args[1], + ) + + +def test_update_document_rest_flattened_error(transport: str = "rest"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_document( + retriever_service.UpdateDocumentRequest(), + document=retriever.Document(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_update_document_rest_error(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.DeleteDocumentRequest, + dict, + ], +) +def test_delete_document_rest(request_type): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "corpora/sample1/documents/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.delete_document(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_document_rest_required_fields( + request_type=retriever_service.DeleteDocumentRequest, +): + transport_class = transports.RetrieverServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_document._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_document._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("force",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.delete_document(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_document_rest_unset_required_fields(): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_document._get_unset_required_fields({}) + assert set(unset_fields) == (set(("force",)) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_document_rest_interceptors(null_interceptor): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.RetrieverServiceRestInterceptor(), + ) + client = RetrieverServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "pre_delete_document" + ) as pre: + pre.assert_not_called() + pb_message = retriever_service.DeleteDocumentRequest.pb( + retriever_service.DeleteDocumentRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + + request = retriever_service.DeleteDocumentRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.delete_document( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + + +def test_delete_document_rest_bad_request( + transport: str = "rest", request_type=retriever_service.DeleteDocumentRequest +): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "corpora/sample1/documents/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_document(request) + + +def test_delete_document_rest_flattened(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "corpora/sample1/documents/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.delete_document(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{name=corpora/*/documents/*}" % client.transport._host, args[1] + ) + + +def test_delete_document_rest_flattened_error(transport: str = "rest"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_document( + retriever_service.DeleteDocumentRequest(), + name="name_value", + ) + + +def test_delete_document_rest_error(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.ListDocumentsRequest, + dict, + ], +) +def test_list_documents_rest(request_type): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "corpora/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = retriever_service.ListDocumentsResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = retriever_service.ListDocumentsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_documents(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDocumentsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_documents_rest_required_fields( + request_type=retriever_service.ListDocumentsRequest, +): + transport_class = transports.RetrieverServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_documents._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_documents._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = retriever_service.ListDocumentsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = retriever_service.ListDocumentsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_documents(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_documents_rest_unset_required_fields(): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_documents._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_documents_rest_interceptors(null_interceptor): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.RetrieverServiceRestInterceptor(), + ) + client = RetrieverServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "post_list_documents" + ) as post, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "pre_list_documents" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = retriever_service.ListDocumentsRequest.pb( + retriever_service.ListDocumentsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = retriever_service.ListDocumentsResponse.to_json( + retriever_service.ListDocumentsResponse() + ) + + request = retriever_service.ListDocumentsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = retriever_service.ListDocumentsResponse() + + client.list_documents( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_documents_rest_bad_request( + transport: str = "rest", request_type=retriever_service.ListDocumentsRequest +): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "corpora/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_documents(request) + + +def test_list_documents_rest_flattened(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = retriever_service.ListDocumentsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "corpora/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = retriever_service.ListDocumentsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.list_documents(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{parent=corpora/*}/documents" % client.transport._host, args[1] + ) + + +def test_list_documents_rest_flattened_error(transport: str = "rest"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_documents( + retriever_service.ListDocumentsRequest(), + parent="parent_value", + ) + + +def test_list_documents_rest_pager(transport: str = "rest"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + retriever_service.ListDocumentsResponse( + documents=[ + retriever.Document(), + retriever.Document(), + retriever.Document(), + ], + next_page_token="abc", + ), + retriever_service.ListDocumentsResponse( + documents=[], + next_page_token="def", + ), + retriever_service.ListDocumentsResponse( + documents=[ + retriever.Document(), + ], + next_page_token="ghi", + ), + retriever_service.ListDocumentsResponse( + documents=[ + retriever.Document(), + retriever.Document(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + retriever_service.ListDocumentsResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "corpora/sample1"} + + pager = client.list_documents(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, retriever.Document) for i in results) + + pages = list(client.list_documents(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.QueryDocumentRequest, + dict, + ], +) +def test_query_document_rest(request_type): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "corpora/sample1/documents/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = retriever_service.QueryDocumentResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = retriever_service.QueryDocumentResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.query_document(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever_service.QueryDocumentResponse) + + +def test_query_document_rest_required_fields( + request_type=retriever_service.QueryDocumentRequest, +): + transport_class = transports.RetrieverServiceRestTransport + + request_init = {} + request_init["name"] = "" + request_init["query"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).query_document._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + jsonified_request["query"] = "query_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).query_document._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + assert "query" in jsonified_request + assert jsonified_request["query"] == "query_value" + + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = retriever_service.QueryDocumentResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = retriever_service.QueryDocumentResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.query_document(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_query_document_rest_unset_required_fields(): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.query_document._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "name", + "query", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_query_document_rest_interceptors(null_interceptor): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.RetrieverServiceRestInterceptor(), + ) + client = RetrieverServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "post_query_document" + ) as post, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "pre_query_document" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = retriever_service.QueryDocumentRequest.pb( + retriever_service.QueryDocumentRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = retriever_service.QueryDocumentResponse.to_json( + retriever_service.QueryDocumentResponse() + ) + + request = retriever_service.QueryDocumentRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = retriever_service.QueryDocumentResponse() + + client.query_document( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_query_document_rest_bad_request( + transport: str = "rest", request_type=retriever_service.QueryDocumentRequest +): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "corpora/sample1/documents/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.query_document(request) + + +def test_query_document_rest_error(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.CreateChunkRequest, + dict, + ], +) +def test_create_chunk_rest(request_type): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "corpora/sample1/documents/sample2"} + request_init["chunk"] = { + "name": "name_value", + "data": {"string_value": "string_value_value"}, + "custom_metadata": [ + { + "string_value": "string_value_value", + "string_list_value": {"values": ["values_value1", "values_value2"]}, + "numeric_value": 0.1391, + "key": "key_value", + } + ], + "create_time": {"seconds": 751, "nanos": 543}, + "update_time": {}, + "state": 1, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = retriever_service.CreateChunkRequest.meta.fields["chunk"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["chunk"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["chunk"][field])): + del request_init["chunk"][field][i][subfield] + else: + del request_init["chunk"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = retriever.Chunk( + name="name_value", + state=retriever.Chunk.State.STATE_PENDING_PROCESSING, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = retriever.Chunk.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.create_chunk(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever.Chunk) + assert response.name == "name_value" + assert response.state == retriever.Chunk.State.STATE_PENDING_PROCESSING + + +def test_create_chunk_rest_required_fields( + request_type=retriever_service.CreateChunkRequest, +): + transport_class = transports.RetrieverServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_chunk._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_chunk._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = retriever.Chunk() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = retriever.Chunk.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.create_chunk(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_chunk_rest_unset_required_fields(): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_chunk._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "parent", + "chunk", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_chunk_rest_interceptors(null_interceptor): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.RetrieverServiceRestInterceptor(), + ) + client = RetrieverServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "post_create_chunk" + ) as post, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "pre_create_chunk" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = retriever_service.CreateChunkRequest.pb( + retriever_service.CreateChunkRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = retriever.Chunk.to_json(retriever.Chunk()) + + request = retriever_service.CreateChunkRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = retriever.Chunk() + + client.create_chunk( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_create_chunk_rest_bad_request( + transport: str = "rest", request_type=retriever_service.CreateChunkRequest +): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "corpora/sample1/documents/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_chunk(request) + + +def test_create_chunk_rest_flattened(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = retriever.Chunk() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "corpora/sample1/documents/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + chunk=retriever.Chunk(name="name_value"), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = retriever.Chunk.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.create_chunk(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{parent=corpora/*/documents/*}/chunks" % client.transport._host, + args[1], + ) + + +def test_create_chunk_rest_flattened_error(transport: str = "rest"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_chunk( + retriever_service.CreateChunkRequest(), + parent="parent_value", + chunk=retriever.Chunk(name="name_value"), + ) + + +def test_create_chunk_rest_error(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.BatchCreateChunksRequest, + dict, + ], +) +def test_batch_create_chunks_rest(request_type): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "corpora/sample1/documents/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = retriever_service.BatchCreateChunksResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = retriever_service.BatchCreateChunksResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.batch_create_chunks(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever_service.BatchCreateChunksResponse) + + +def test_batch_create_chunks_rest_required_fields( + request_type=retriever_service.BatchCreateChunksRequest, +): + transport_class = transports.RetrieverServiceRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).batch_create_chunks._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).batch_create_chunks._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = retriever_service.BatchCreateChunksResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = retriever_service.BatchCreateChunksResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.batch_create_chunks(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_batch_create_chunks_rest_unset_required_fields(): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.batch_create_chunks._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("requests",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_batch_create_chunks_rest_interceptors(null_interceptor): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.RetrieverServiceRestInterceptor(), + ) + client = RetrieverServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "post_batch_create_chunks" + ) as post, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "pre_batch_create_chunks" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = retriever_service.BatchCreateChunksRequest.pb( + retriever_service.BatchCreateChunksRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = retriever_service.BatchCreateChunksResponse.to_json( + retriever_service.BatchCreateChunksResponse() + ) + + request = retriever_service.BatchCreateChunksRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = retriever_service.BatchCreateChunksResponse() + + client.batch_create_chunks( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_batch_create_chunks_rest_bad_request( + transport: str = "rest", request_type=retriever_service.BatchCreateChunksRequest +): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "corpora/sample1/documents/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.batch_create_chunks(request) + + +def test_batch_create_chunks_rest_error(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.GetChunkRequest, + dict, + ], +) +def test_get_chunk_rest(request_type): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "corpora/sample1/documents/sample2/chunks/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = retriever.Chunk( + name="name_value", + state=retriever.Chunk.State.STATE_PENDING_PROCESSING, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = retriever.Chunk.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_chunk(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever.Chunk) + assert response.name == "name_value" + assert response.state == retriever.Chunk.State.STATE_PENDING_PROCESSING + + +def test_get_chunk_rest_required_fields(request_type=retriever_service.GetChunkRequest): + transport_class = transports.RetrieverServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_chunk._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_chunk._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = retriever.Chunk() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = retriever.Chunk.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_chunk(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_chunk_rest_unset_required_fields(): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_chunk._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_chunk_rest_interceptors(null_interceptor): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.RetrieverServiceRestInterceptor(), + ) + client = RetrieverServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "post_get_chunk" + ) as post, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "pre_get_chunk" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = retriever_service.GetChunkRequest.pb( + retriever_service.GetChunkRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = retriever.Chunk.to_json(retriever.Chunk()) + + request = retriever_service.GetChunkRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = retriever.Chunk() + + client.get_chunk( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_chunk_rest_bad_request( + transport: str = "rest", request_type=retriever_service.GetChunkRequest +): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "corpora/sample1/documents/sample2/chunks/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_chunk(request) + + +def test_get_chunk_rest_flattened(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = retriever.Chunk() + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "corpora/sample1/documents/sample2/chunks/sample3"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = retriever.Chunk.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_chunk(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{name=corpora/*/documents/*/chunks/*}" % client.transport._host, + args[1], + ) + + +def test_get_chunk_rest_flattened_error(transport: str = "rest"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_chunk( + retriever_service.GetChunkRequest(), + name="name_value", + ) + + +def test_get_chunk_rest_error(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.UpdateChunkRequest, + dict, + ], +) +def test_update_chunk_rest(request_type): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "chunk": {"name": "corpora/sample1/documents/sample2/chunks/sample3"} + } + request_init["chunk"] = { + "name": "corpora/sample1/documents/sample2/chunks/sample3", + "data": {"string_value": "string_value_value"}, + "custom_metadata": [ + { + "string_value": "string_value_value", + "string_list_value": {"values": ["values_value1", "values_value2"]}, + "numeric_value": 0.1391, + "key": "key_value", + } + ], + "create_time": {"seconds": 751, "nanos": 543}, + "update_time": {}, + "state": 1, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = retriever_service.UpdateChunkRequest.meta.fields["chunk"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["chunk"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["chunk"][field])): + del request_init["chunk"][field][i][subfield] + else: + del request_init["chunk"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = retriever.Chunk( + name="name_value", + state=retriever.Chunk.State.STATE_PENDING_PROCESSING, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = retriever.Chunk.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.update_chunk(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever.Chunk) + assert response.name == "name_value" + assert response.state == retriever.Chunk.State.STATE_PENDING_PROCESSING + + +def test_update_chunk_rest_required_fields( + request_type=retriever_service.UpdateChunkRequest, +): + transport_class = transports.RetrieverServiceRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_chunk._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_chunk._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = retriever.Chunk() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = retriever.Chunk.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.update_chunk(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_update_chunk_rest_unset_required_fields(): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.update_chunk._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("updateMask",)) + & set( + ( + "chunk", + "updateMask", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_chunk_rest_interceptors(null_interceptor): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.RetrieverServiceRestInterceptor(), + ) + client = RetrieverServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "post_update_chunk" + ) as post, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "pre_update_chunk" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = retriever_service.UpdateChunkRequest.pb( + retriever_service.UpdateChunkRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = retriever.Chunk.to_json(retriever.Chunk()) + + request = retriever_service.UpdateChunkRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = retriever.Chunk() + + client.update_chunk( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_update_chunk_rest_bad_request( + transport: str = "rest", request_type=retriever_service.UpdateChunkRequest +): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "chunk": {"name": "corpora/sample1/documents/sample2/chunks/sample3"} + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_chunk(request) + + +def test_update_chunk_rest_flattened(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = retriever.Chunk() + + # get arguments that satisfy an http rule for this method + sample_request = { + "chunk": {"name": "corpora/sample1/documents/sample2/chunks/sample3"} + } + + # get truthy value for each flattened field + mock_args = dict( + chunk=retriever.Chunk(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = retriever.Chunk.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.update_chunk(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{chunk.name=corpora/*/documents/*/chunks/*}" + % client.transport._host, + args[1], + ) + + +def test_update_chunk_rest_flattened_error(transport: str = "rest"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_chunk( + retriever_service.UpdateChunkRequest(), + chunk=retriever.Chunk(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_update_chunk_rest_error(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.BatchUpdateChunksRequest, + dict, + ], +) +def test_batch_update_chunks_rest(request_type): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "corpora/sample1/documents/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = retriever_service.BatchUpdateChunksResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = retriever_service.BatchUpdateChunksResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.batch_update_chunks(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, retriever_service.BatchUpdateChunksResponse) + + +def test_batch_update_chunks_rest_required_fields( + request_type=retriever_service.BatchUpdateChunksRequest, +): + transport_class = transports.RetrieverServiceRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).batch_update_chunks._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).batch_update_chunks._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = retriever_service.BatchUpdateChunksResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = retriever_service.BatchUpdateChunksResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.batch_update_chunks(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_batch_update_chunks_rest_unset_required_fields(): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.batch_update_chunks._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("requests",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_batch_update_chunks_rest_interceptors(null_interceptor): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.RetrieverServiceRestInterceptor(), + ) + client = RetrieverServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "post_batch_update_chunks" + ) as post, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "pre_batch_update_chunks" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = retriever_service.BatchUpdateChunksRequest.pb( + retriever_service.BatchUpdateChunksRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = retriever_service.BatchUpdateChunksResponse.to_json( + retriever_service.BatchUpdateChunksResponse() + ) + + request = retriever_service.BatchUpdateChunksRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = retriever_service.BatchUpdateChunksResponse() + + client.batch_update_chunks( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_batch_update_chunks_rest_bad_request( + transport: str = "rest", request_type=retriever_service.BatchUpdateChunksRequest +): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "corpora/sample1/documents/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.batch_update_chunks(request) + + +def test_batch_update_chunks_rest_error(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.DeleteChunkRequest, + dict, + ], +) +def test_delete_chunk_rest(request_type): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "corpora/sample1/documents/sample2/chunks/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.delete_chunk(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_chunk_rest_required_fields( + request_type=retriever_service.DeleteChunkRequest, +): + transport_class = transports.RetrieverServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_chunk._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_chunk._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.delete_chunk(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_chunk_rest_unset_required_fields(): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_chunk._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_chunk_rest_interceptors(null_interceptor): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.RetrieverServiceRestInterceptor(), + ) + client = RetrieverServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "pre_delete_chunk" + ) as pre: + pre.assert_not_called() + pb_message = retriever_service.DeleteChunkRequest.pb( + retriever_service.DeleteChunkRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + + request = retriever_service.DeleteChunkRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.delete_chunk( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + + +def test_delete_chunk_rest_bad_request( + transport: str = "rest", request_type=retriever_service.DeleteChunkRequest +): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "corpora/sample1/documents/sample2/chunks/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_chunk(request) + + +def test_delete_chunk_rest_flattened(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "corpora/sample1/documents/sample2/chunks/sample3"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.delete_chunk(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{name=corpora/*/documents/*/chunks/*}" % client.transport._host, + args[1], + ) + + +def test_delete_chunk_rest_flattened_error(transport: str = "rest"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_chunk( + retriever_service.DeleteChunkRequest(), + name="name_value", + ) + + +def test_delete_chunk_rest_error(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.BatchDeleteChunksRequest, + dict, + ], +) +def test_batch_delete_chunks_rest(request_type): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "corpora/sample1/documents/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.batch_delete_chunks(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_batch_delete_chunks_rest_required_fields( + request_type=retriever_service.BatchDeleteChunksRequest, +): + transport_class = transports.RetrieverServiceRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).batch_delete_chunks._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).batch_delete_chunks._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.batch_delete_chunks(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_batch_delete_chunks_rest_unset_required_fields(): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.batch_delete_chunks._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("requests",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_batch_delete_chunks_rest_interceptors(null_interceptor): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.RetrieverServiceRestInterceptor(), + ) + client = RetrieverServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "pre_batch_delete_chunks" + ) as pre: + pre.assert_not_called() + pb_message = retriever_service.BatchDeleteChunksRequest.pb( + retriever_service.BatchDeleteChunksRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + + request = retriever_service.BatchDeleteChunksRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.batch_delete_chunks( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + + +def test_batch_delete_chunks_rest_bad_request( + transport: str = "rest", request_type=retriever_service.BatchDeleteChunksRequest +): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "corpora/sample1/documents/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.batch_delete_chunks(request) + + +def test_batch_delete_chunks_rest_error(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + retriever_service.ListChunksRequest, + dict, + ], +) +def test_list_chunks_rest(request_type): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "corpora/sample1/documents/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = retriever_service.ListChunksResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = retriever_service.ListChunksResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_chunks(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListChunksPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_chunks_rest_required_fields( + request_type=retriever_service.ListChunksRequest, +): + transport_class = transports.RetrieverServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_chunks._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_chunks._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = retriever_service.ListChunksResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = retriever_service.ListChunksResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_chunks(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_chunks_rest_unset_required_fields(): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_chunks._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_chunks_rest_interceptors(null_interceptor): + transport = transports.RetrieverServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.RetrieverServiceRestInterceptor(), + ) + client = RetrieverServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "post_list_chunks" + ) as post, mock.patch.object( + transports.RetrieverServiceRestInterceptor, "pre_list_chunks" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = retriever_service.ListChunksRequest.pb( + retriever_service.ListChunksRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = retriever_service.ListChunksResponse.to_json( + retriever_service.ListChunksResponse() + ) + + request = retriever_service.ListChunksRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = retriever_service.ListChunksResponse() + + client.list_chunks( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_chunks_rest_bad_request( + transport: str = "rest", request_type=retriever_service.ListChunksRequest +): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "corpora/sample1/documents/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_chunks(request) + + +def test_list_chunks_rest_flattened(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = retriever_service.ListChunksResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "corpora/sample1/documents/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = retriever_service.ListChunksResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.list_chunks(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{parent=corpora/*/documents/*}/chunks" % client.transport._host, + args[1], + ) + + +def test_list_chunks_rest_flattened_error(transport: str = "rest"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_chunks( + retriever_service.ListChunksRequest(), + parent="parent_value", + ) + + +def test_list_chunks_rest_pager(transport: str = "rest"): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + retriever_service.ListChunksResponse( + chunks=[ + retriever.Chunk(), + retriever.Chunk(), + retriever.Chunk(), + ], + next_page_token="abc", + ), + retriever_service.ListChunksResponse( + chunks=[], + next_page_token="def", + ), + retriever_service.ListChunksResponse( + chunks=[ + retriever.Chunk(), + ], + next_page_token="ghi", + ), + retriever_service.ListChunksResponse( + chunks=[ + retriever.Chunk(), + retriever.Chunk(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + retriever_service.ListChunksResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "corpora/sample1/documents/sample2"} + + pager = client.list_chunks(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, retriever.Chunk) for i in results) + + pages = list(client.list_chunks(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.RetrieverServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.RetrieverServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RetrieverServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.RetrieverServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = RetrieverServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = RetrieverServiceClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.RetrieverServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RetrieverServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.RetrieverServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = RetrieverServiceClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.RetrieverServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.RetrieverServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.RetrieverServiceGrpcTransport, + transports.RetrieverServiceGrpcAsyncIOTransport, + transports.RetrieverServiceRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "rest", + ], +) +def test_transport_kind(transport_name): + transport = RetrieverServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.RetrieverServiceGrpcTransport, + ) + + +def test_retriever_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.RetrieverServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_retriever_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.ai.generativelanguage_v1beta.services.retriever_service.transports.RetrieverServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.RetrieverServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_corpus", + "get_corpus", + "update_corpus", + "delete_corpus", + "list_corpora", + "query_corpus", + "create_document", + "get_document", + "update_document", + "delete_document", + "list_documents", + "query_document", + "create_chunk", + "batch_create_chunks", + "get_chunk", + "update_chunk", + "batch_update_chunks", + "delete_chunk", + "batch_delete_chunks", + "list_chunks", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_retriever_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch( + "google.ai.generativelanguage_v1beta.services.retriever_service.transports.RetrieverServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RetrieverServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=None, + default_scopes=(), + quota_project_id="octopus", + ) + + +def test_retriever_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( + "google.ai.generativelanguage_v1beta.services.retriever_service.transports.RetrieverServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RetrieverServiceTransport() + adc.assert_called_once() + + +def test_retriever_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + RetrieverServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=(), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.RetrieverServiceGrpcTransport, + transports.RetrieverServiceGrpcAsyncIOTransport, + ], +) +def test_retriever_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=(), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.RetrieverServiceGrpcTransport, + transports.RetrieverServiceGrpcAsyncIOTransport, + transports.RetrieverServiceRestTransport, + ], +) +def test_retriever_service_transport_auth_gdch_credentials(transport_class): + host = "https://language.com" + api_audience_tests = [None, "https://language2.com"] + api_audience_expect = [host, "https://language2.com"] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, "default", autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock( + return_value=gdch_mock + ) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with(e) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.RetrieverServiceGrpcTransport, grpc_helpers), + (transports.RetrieverServiceGrpcAsyncIOTransport, grpc_helpers_async), + ], +) +def test_retriever_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + + create_channel.assert_called_with( + "generativelanguage.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=(), + scopes=["1", "2"], + default_host="generativelanguage.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.RetrieverServiceGrpcTransport, + transports.RetrieverServiceGrpcAsyncIOTransport, + ], +) +def test_retriever_service_grpc_transport_client_cert_source_for_mtls(transport_class): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_retriever_service_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ) as mock_configure_mtls_channel: + transports.RetrieverServiceRestTransport( + credentials=cred, client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_retriever_service_host_no_port(transport_name): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="generativelanguage.googleapis.com" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "generativelanguage.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://generativelanguage.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_retriever_service_host_with_port(transport_name): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="generativelanguage.googleapis.com:8000" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "generativelanguage.googleapis.com:8000" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://generativelanguage.googleapis.com:8000" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "rest", + ], +) +def test_retriever_service_client_transport_session_collision(transport_name): + creds1 = ga_credentials.AnonymousCredentials() + creds2 = ga_credentials.AnonymousCredentials() + client1 = RetrieverServiceClient( + credentials=creds1, + transport=transport_name, + ) + client2 = RetrieverServiceClient( + credentials=creds2, + transport=transport_name, + ) + session1 = client1.transport.create_corpus._session + session2 = client2.transport.create_corpus._session + assert session1 != session2 + session1 = client1.transport.get_corpus._session + session2 = client2.transport.get_corpus._session + assert session1 != session2 + session1 = client1.transport.update_corpus._session + session2 = client2.transport.update_corpus._session + assert session1 != session2 + session1 = client1.transport.delete_corpus._session + session2 = client2.transport.delete_corpus._session + assert session1 != session2 + session1 = client1.transport.list_corpora._session + session2 = client2.transport.list_corpora._session + assert session1 != session2 + session1 = client1.transport.query_corpus._session + session2 = client2.transport.query_corpus._session + assert session1 != session2 + session1 = client1.transport.create_document._session + session2 = client2.transport.create_document._session + assert session1 != session2 + session1 = client1.transport.get_document._session + session2 = client2.transport.get_document._session + assert session1 != session2 + session1 = client1.transport.update_document._session + session2 = client2.transport.update_document._session + assert session1 != session2 + session1 = client1.transport.delete_document._session + session2 = client2.transport.delete_document._session + assert session1 != session2 + session1 = client1.transport.list_documents._session + session2 = client2.transport.list_documents._session + assert session1 != session2 + session1 = client1.transport.query_document._session + session2 = client2.transport.query_document._session + assert session1 != session2 + session1 = client1.transport.create_chunk._session + session2 = client2.transport.create_chunk._session + assert session1 != session2 + session1 = client1.transport.batch_create_chunks._session + session2 = client2.transport.batch_create_chunks._session + assert session1 != session2 + session1 = client1.transport.get_chunk._session + session2 = client2.transport.get_chunk._session + assert session1 != session2 + session1 = client1.transport.update_chunk._session + session2 = client2.transport.update_chunk._session + assert session1 != session2 + session1 = client1.transport.batch_update_chunks._session + session2 = client2.transport.batch_update_chunks._session + assert session1 != session2 + session1 = client1.transport.delete_chunk._session + session2 = client2.transport.delete_chunk._session + assert session1 != session2 + session1 = client1.transport.batch_delete_chunks._session + session2 = client2.transport.batch_delete_chunks._session + assert session1 != session2 + session1 = client1.transport.list_chunks._session + session2 = client2.transport.list_chunks._session + assert session1 != session2 + + +def test_retriever_service_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.RetrieverServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_retriever_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.RetrieverServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.RetrieverServiceGrpcTransport, + transports.RetrieverServiceGrpcAsyncIOTransport, + ], +) +def test_retriever_service_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.RetrieverServiceGrpcTransport, + transports.RetrieverServiceGrpcAsyncIOTransport, + ], +) +def test_retriever_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_chunk_path(): + corpus = "squid" + document = "clam" + chunk = "whelk" + expected = "corpora/{corpus}/documents/{document}/chunks/{chunk}".format( + corpus=corpus, + document=document, + chunk=chunk, + ) + actual = RetrieverServiceClient.chunk_path(corpus, document, chunk) + assert expected == actual + + +def test_parse_chunk_path(): + expected = { + "corpus": "octopus", + "document": "oyster", + "chunk": "nudibranch", + } + path = RetrieverServiceClient.chunk_path(**expected) + + # Check that the path construction is reversible. + actual = RetrieverServiceClient.parse_chunk_path(path) + assert expected == actual + + +def test_corpus_path(): + corpus = "cuttlefish" + expected = "corpora/{corpus}".format( + corpus=corpus, + ) + actual = RetrieverServiceClient.corpus_path(corpus) + assert expected == actual + + +def test_parse_corpus_path(): + expected = { + "corpus": "mussel", + } + path = RetrieverServiceClient.corpus_path(**expected) + + # Check that the path construction is reversible. + actual = RetrieverServiceClient.parse_corpus_path(path) + assert expected == actual + + +def test_document_path(): + corpus = "winkle" + document = "nautilus" + expected = "corpora/{corpus}/documents/{document}".format( + corpus=corpus, + document=document, + ) + actual = RetrieverServiceClient.document_path(corpus, document) + assert expected == actual + + +def test_parse_document_path(): + expected = { + "corpus": "scallop", + "document": "abalone", + } + path = RetrieverServiceClient.document_path(**expected) + + # Check that the path construction is reversible. + actual = RetrieverServiceClient.parse_document_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = RetrieverServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = RetrieverServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = RetrieverServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format( + folder=folder, + ) + actual = RetrieverServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = RetrieverServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = RetrieverServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format( + organization=organization, + ) + actual = RetrieverServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = RetrieverServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = RetrieverServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format( + project=project, + ) + actual = RetrieverServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = RetrieverServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = RetrieverServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + actual = RetrieverServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = RetrieverServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = RetrieverServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.RetrieverServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.RetrieverServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = RetrieverServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = RetrieverServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "rest": "_session", + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "rest", + "grpc", + ] + for transport in transports: + client = RetrieverServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + + +@pytest.mark.parametrize( + "client_class,transport_class", + [ + (RetrieverServiceClient, transports.RetrieverServiceGrpcTransport), + (RetrieverServiceAsyncClient, transports.RetrieverServiceGrpcAsyncIOTransport), + ], +) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) diff --git a/packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1beta/test_text_service.py b/packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1beta/test_text_service.py new file mode 100644 index 000000000000..17dbc3d8a503 --- /dev/null +++ b/packages/google-ai-generativelanguage/tests/unit/gapic/generativelanguage_v1beta/test_text_service.py @@ -0,0 +1,3544 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER + import mock + +from collections.abc import Iterable +import json +import math + +from google.api_core import gapic_v1, grpc_helpers, grpc_helpers_async, path_template +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +import google.auth +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account +from google.protobuf import json_format +import grpc +from grpc.experimental import aio +from proto.marshal.rules import wrappers +from proto.marshal.rules.dates import DurationRule, TimestampRule +import pytest +from requests import PreparedRequest, Request, Response +from requests.sessions import Session + +from google.ai.generativelanguage_v1beta.services.text_service import ( + TextServiceAsyncClient, + TextServiceClient, + transports, +) +from google.ai.generativelanguage_v1beta.types import safety, text_service + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert TextServiceClient._get_default_mtls_endpoint(None) is None + assert ( + TextServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + ) + assert ( + TextServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + TextServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + TextServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert TextServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (TextServiceClient, "grpc"), + (TextServiceAsyncClient, "grpc_asyncio"), + (TextServiceClient, "rest"), + ], +) +def test_text_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "generativelanguage.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://generativelanguage.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.TextServiceGrpcTransport, "grpc"), + (transports.TextServiceGrpcAsyncIOTransport, "grpc_asyncio"), + (transports.TextServiceRestTransport, "rest"), + ], +) +def test_text_service_client_service_account_always_use_jwt( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (TextServiceClient, "grpc"), + (TextServiceAsyncClient, "grpc_asyncio"), + (TextServiceClient, "rest"), + ], +) +def test_text_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "generativelanguage.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://generativelanguage.googleapis.com" + ) + + +def test_text_service_client_get_transport_class(): + transport = TextServiceClient.get_transport_class() + available_transports = [ + transports.TextServiceGrpcTransport, + transports.TextServiceRestTransport, + ] + assert transport in available_transports + + transport = TextServiceClient.get_transport_class("grpc") + assert transport == transports.TextServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (TextServiceClient, transports.TextServiceGrpcTransport, "grpc"), + ( + TextServiceAsyncClient, + transports.TextServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + (TextServiceClient, transports.TextServiceRestTransport, "rest"), + ], +) +@mock.patch.object( + TextServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TextServiceClient) +) +@mock.patch.object( + TextServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(TextServiceAsyncClient), +) +def test_text_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(TextServiceClient, "get_transport_class") as gtc: + transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(TextServiceClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions( + api_audience="https://language.googleapis.com" + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com", + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + (TextServiceClient, transports.TextServiceGrpcTransport, "grpc", "true"), + ( + TextServiceAsyncClient, + transports.TextServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + (TextServiceClient, transports.TextServiceGrpcTransport, "grpc", "false"), + ( + TextServiceAsyncClient, + transports.TextServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + (TextServiceClient, transports.TextServiceRestTransport, "rest", "true"), + (TextServiceClient, transports.TextServiceRestTransport, "rest", "false"), + ], +) +@mock.patch.object( + TextServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TextServiceClient) +) +@mock.patch.object( + TextServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(TextServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_text_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize("client_class", [TextServiceClient, TextServiceAsyncClient]) +@mock.patch.object( + TextServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TextServiceClient) +) +@mock.patch.object( + TextServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(TextServiceAsyncClient), +) +def test_text_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_client_cert_source, + ): + ( + api_endpoint, + cert_source, + ) = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (TextServiceClient, transports.TextServiceGrpcTransport, "grpc"), + ( + TextServiceAsyncClient, + transports.TextServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + (TextServiceClient, transports.TextServiceRestTransport, "rest"), + ], +) +def test_text_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + (TextServiceClient, transports.TextServiceGrpcTransport, "grpc", grpc_helpers), + ( + TextServiceAsyncClient, + transports.TextServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + (TextServiceClient, transports.TextServiceRestTransport, "rest", None), + ], +) +def test_text_service_client_client_options_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +def test_text_service_client_client_options_from_dict(): + with mock.patch( + "google.ai.generativelanguage_v1beta.services.text_service.transports.TextServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = TextServiceClient(client_options={"api_endpoint": "squid.clam.whelk"}) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + (TextServiceClient, transports.TextServiceGrpcTransport, "grpc", grpc_helpers), + ( + TextServiceAsyncClient, + transports.TextServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ], +) +def test_text_service_client_create_channel_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "generativelanguage.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=(), + scopes=None, + default_host="generativelanguage.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + text_service.GenerateTextRequest, + dict, + ], +) +def test_generate_text(request_type, transport: str = "grpc"): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_text), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = text_service.GenerateTextResponse() + response = client.generate_text(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == text_service.GenerateTextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, text_service.GenerateTextResponse) + + +def test_generate_text_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_text), "__call__") as call: + client.generate_text() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == text_service.GenerateTextRequest() + + +@pytest.mark.asyncio +async def test_generate_text_async( + transport: str = "grpc_asyncio", request_type=text_service.GenerateTextRequest +): + client = TextServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_text), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + text_service.GenerateTextResponse() + ) + response = await client.generate_text(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == text_service.GenerateTextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, text_service.GenerateTextResponse) + + +@pytest.mark.asyncio +async def test_generate_text_async_from_dict(): + await test_generate_text_async(request_type=dict) + + +def test_generate_text_field_headers(): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = text_service.GenerateTextRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_text), "__call__") as call: + call.return_value = text_service.GenerateTextResponse() + client.generate_text(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_generate_text_field_headers_async(): + client = TextServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = text_service.GenerateTextRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_text), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + text_service.GenerateTextResponse() + ) + await client.generate_text(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +def test_generate_text_flattened(): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_text), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = text_service.GenerateTextResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.generate_text( + model="model_value", + prompt=text_service.TextPrompt(text="text_value"), + temperature=0.1198, + candidate_count=1573, + max_output_tokens=1865, + top_p=0.546, + top_k=541, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].prompt + mock_val = text_service.TextPrompt(text="text_value") + assert arg == mock_val + assert math.isclose(args[0].temperature, 0.1198, rel_tol=1e-6) + arg = args[0].candidate_count + mock_val = 1573 + assert arg == mock_val + arg = args[0].max_output_tokens + mock_val = 1865 + assert arg == mock_val + assert math.isclose(args[0].top_p, 0.546, rel_tol=1e-6) + arg = args[0].top_k + mock_val = 541 + assert arg == mock_val + + +def test_generate_text_flattened_error(): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.generate_text( + text_service.GenerateTextRequest(), + model="model_value", + prompt=text_service.TextPrompt(text="text_value"), + temperature=0.1198, + candidate_count=1573, + max_output_tokens=1865, + top_p=0.546, + top_k=541, + ) + + +@pytest.mark.asyncio +async def test_generate_text_flattened_async(): + client = TextServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.generate_text), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = text_service.GenerateTextResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + text_service.GenerateTextResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.generate_text( + model="model_value", + prompt=text_service.TextPrompt(text="text_value"), + temperature=0.1198, + candidate_count=1573, + max_output_tokens=1865, + top_p=0.546, + top_k=541, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].prompt + mock_val = text_service.TextPrompt(text="text_value") + assert arg == mock_val + assert math.isclose(args[0].temperature, 0.1198, rel_tol=1e-6) + arg = args[0].candidate_count + mock_val = 1573 + assert arg == mock_val + arg = args[0].max_output_tokens + mock_val = 1865 + assert arg == mock_val + assert math.isclose(args[0].top_p, 0.546, rel_tol=1e-6) + arg = args[0].top_k + mock_val = 541 + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_generate_text_flattened_error_async(): + client = TextServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.generate_text( + text_service.GenerateTextRequest(), + model="model_value", + prompt=text_service.TextPrompt(text="text_value"), + temperature=0.1198, + candidate_count=1573, + max_output_tokens=1865, + top_p=0.546, + top_k=541, + ) + + +@pytest.mark.parametrize( + "request_type", + [ + text_service.EmbedTextRequest, + dict, + ], +) +def test_embed_text(request_type, transport: str = "grpc"): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.embed_text), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = text_service.EmbedTextResponse() + response = client.embed_text(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == text_service.EmbedTextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, text_service.EmbedTextResponse) + + +def test_embed_text_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.embed_text), "__call__") as call: + client.embed_text() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == text_service.EmbedTextRequest() + + +@pytest.mark.asyncio +async def test_embed_text_async( + transport: str = "grpc_asyncio", request_type=text_service.EmbedTextRequest +): + client = TextServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.embed_text), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + text_service.EmbedTextResponse() + ) + response = await client.embed_text(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == text_service.EmbedTextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, text_service.EmbedTextResponse) + + +@pytest.mark.asyncio +async def test_embed_text_async_from_dict(): + await test_embed_text_async(request_type=dict) + + +def test_embed_text_field_headers(): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = text_service.EmbedTextRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.embed_text), "__call__") as call: + call.return_value = text_service.EmbedTextResponse() + client.embed_text(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_embed_text_field_headers_async(): + client = TextServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = text_service.EmbedTextRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.embed_text), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + text_service.EmbedTextResponse() + ) + await client.embed_text(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +def test_embed_text_flattened(): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.embed_text), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = text_service.EmbedTextResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.embed_text( + model="model_value", + text="text_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].text + mock_val = "text_value" + assert arg == mock_val + + +def test_embed_text_flattened_error(): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.embed_text( + text_service.EmbedTextRequest(), + model="model_value", + text="text_value", + ) + + +@pytest.mark.asyncio +async def test_embed_text_flattened_async(): + client = TextServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.embed_text), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = text_service.EmbedTextResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + text_service.EmbedTextResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.embed_text( + model="model_value", + text="text_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].text + mock_val = "text_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_embed_text_flattened_error_async(): + client = TextServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.embed_text( + text_service.EmbedTextRequest(), + model="model_value", + text="text_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + text_service.BatchEmbedTextRequest, + dict, + ], +) +def test_batch_embed_text(request_type, transport: str = "grpc"): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.batch_embed_text), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = text_service.BatchEmbedTextResponse() + response = client.batch_embed_text(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == text_service.BatchEmbedTextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, text_service.BatchEmbedTextResponse) + + +def test_batch_embed_text_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.batch_embed_text), "__call__") as call: + client.batch_embed_text() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == text_service.BatchEmbedTextRequest() + + +@pytest.mark.asyncio +async def test_batch_embed_text_async( + transport: str = "grpc_asyncio", request_type=text_service.BatchEmbedTextRequest +): + client = TextServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.batch_embed_text), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + text_service.BatchEmbedTextResponse() + ) + response = await client.batch_embed_text(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == text_service.BatchEmbedTextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, text_service.BatchEmbedTextResponse) + + +@pytest.mark.asyncio +async def test_batch_embed_text_async_from_dict(): + await test_batch_embed_text_async(request_type=dict) + + +def test_batch_embed_text_field_headers(): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = text_service.BatchEmbedTextRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.batch_embed_text), "__call__") as call: + call.return_value = text_service.BatchEmbedTextResponse() + client.batch_embed_text(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_batch_embed_text_field_headers_async(): + client = TextServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = text_service.BatchEmbedTextRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.batch_embed_text), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + text_service.BatchEmbedTextResponse() + ) + await client.batch_embed_text(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +def test_batch_embed_text_flattened(): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.batch_embed_text), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = text_service.BatchEmbedTextResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.batch_embed_text( + model="model_value", + texts=["texts_value"], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].texts + mock_val = ["texts_value"] + assert arg == mock_val + + +def test_batch_embed_text_flattened_error(): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_embed_text( + text_service.BatchEmbedTextRequest(), + model="model_value", + texts=["texts_value"], + ) + + +@pytest.mark.asyncio +async def test_batch_embed_text_flattened_async(): + client = TextServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.batch_embed_text), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = text_service.BatchEmbedTextResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + text_service.BatchEmbedTextResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.batch_embed_text( + model="model_value", + texts=["texts_value"], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].texts + mock_val = ["texts_value"] + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_batch_embed_text_flattened_error_async(): + client = TextServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.batch_embed_text( + text_service.BatchEmbedTextRequest(), + model="model_value", + texts=["texts_value"], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + text_service.CountTextTokensRequest, + dict, + ], +) +def test_count_text_tokens(request_type, transport: str = "grpc"): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.count_text_tokens), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = text_service.CountTextTokensResponse( + token_count=1193, + ) + response = client.count_text_tokens(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == text_service.CountTextTokensRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, text_service.CountTextTokensResponse) + assert response.token_count == 1193 + + +def test_count_text_tokens_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.count_text_tokens), "__call__" + ) as call: + client.count_text_tokens() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == text_service.CountTextTokensRequest() + + +@pytest.mark.asyncio +async def test_count_text_tokens_async( + transport: str = "grpc_asyncio", request_type=text_service.CountTextTokensRequest +): + client = TextServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.count_text_tokens), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + text_service.CountTextTokensResponse( + token_count=1193, + ) + ) + response = await client.count_text_tokens(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == text_service.CountTextTokensRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, text_service.CountTextTokensResponse) + assert response.token_count == 1193 + + +@pytest.mark.asyncio +async def test_count_text_tokens_async_from_dict(): + await test_count_text_tokens_async(request_type=dict) + + +def test_count_text_tokens_field_headers(): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = text_service.CountTextTokensRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.count_text_tokens), "__call__" + ) as call: + call.return_value = text_service.CountTextTokensResponse() + client.count_text_tokens(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_count_text_tokens_field_headers_async(): + client = TextServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = text_service.CountTextTokensRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.count_text_tokens), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + text_service.CountTextTokensResponse() + ) + await client.count_text_tokens(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +def test_count_text_tokens_flattened(): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.count_text_tokens), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = text_service.CountTextTokensResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.count_text_tokens( + model="model_value", + prompt=text_service.TextPrompt(text="text_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].prompt + mock_val = text_service.TextPrompt(text="text_value") + assert arg == mock_val + + +def test_count_text_tokens_flattened_error(): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.count_text_tokens( + text_service.CountTextTokensRequest(), + model="model_value", + prompt=text_service.TextPrompt(text="text_value"), + ) + + +@pytest.mark.asyncio +async def test_count_text_tokens_flattened_async(): + client = TextServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.count_text_tokens), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = text_service.CountTextTokensResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + text_service.CountTextTokensResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.count_text_tokens( + model="model_value", + prompt=text_service.TextPrompt(text="text_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].prompt + mock_val = text_service.TextPrompt(text="text_value") + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_count_text_tokens_flattened_error_async(): + client = TextServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.count_text_tokens( + text_service.CountTextTokensRequest(), + model="model_value", + prompt=text_service.TextPrompt(text="text_value"), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + text_service.GenerateTextRequest, + dict, + ], +) +def test_generate_text_rest(request_type): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"model": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = text_service.GenerateTextResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = text_service.GenerateTextResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.generate_text(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, text_service.GenerateTextResponse) + + +def test_generate_text_rest_required_fields( + request_type=text_service.GenerateTextRequest, +): + transport_class = transports.TextServiceRestTransport + + request_init = {} + request_init["model"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).generate_text._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["model"] = "model_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).generate_text._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "model" in jsonified_request + assert jsonified_request["model"] == "model_value" + + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = text_service.GenerateTextResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = text_service.GenerateTextResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.generate_text(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_generate_text_rest_unset_required_fields(): + transport = transports.TextServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.generate_text._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "model", + "prompt", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_generate_text_rest_interceptors(null_interceptor): + transport = transports.TextServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.TextServiceRestInterceptor(), + ) + client = TextServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.TextServiceRestInterceptor, "post_generate_text" + ) as post, mock.patch.object( + transports.TextServiceRestInterceptor, "pre_generate_text" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = text_service.GenerateTextRequest.pb( + text_service.GenerateTextRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = text_service.GenerateTextResponse.to_json( + text_service.GenerateTextResponse() + ) + + request = text_service.GenerateTextRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = text_service.GenerateTextResponse() + + client.generate_text( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_generate_text_rest_bad_request( + transport: str = "rest", request_type=text_service.GenerateTextRequest +): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"model": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.generate_text(request) + + +def test_generate_text_rest_flattened(): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = text_service.GenerateTextResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"model": "models/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + model="model_value", + prompt=text_service.TextPrompt(text="text_value"), + temperature=0.1198, + candidate_count=1573, + max_output_tokens=1865, + top_p=0.546, + top_k=541, + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = text_service.GenerateTextResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.generate_text(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{model=models/*}:generateText" % client.transport._host, args[1] + ) + + +def test_generate_text_rest_flattened_error(transport: str = "rest"): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.generate_text( + text_service.GenerateTextRequest(), + model="model_value", + prompt=text_service.TextPrompt(text="text_value"), + temperature=0.1198, + candidate_count=1573, + max_output_tokens=1865, + top_p=0.546, + top_k=541, + ) + + +def test_generate_text_rest_error(): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + text_service.EmbedTextRequest, + dict, + ], +) +def test_embed_text_rest(request_type): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"model": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = text_service.EmbedTextResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = text_service.EmbedTextResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.embed_text(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, text_service.EmbedTextResponse) + + +def test_embed_text_rest_required_fields(request_type=text_service.EmbedTextRequest): + transport_class = transports.TextServiceRestTransport + + request_init = {} + request_init["model"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).embed_text._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["model"] = "model_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).embed_text._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "model" in jsonified_request + assert jsonified_request["model"] == "model_value" + + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = text_service.EmbedTextResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = text_service.EmbedTextResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.embed_text(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_embed_text_rest_unset_required_fields(): + transport = transports.TextServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.embed_text._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("model",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_embed_text_rest_interceptors(null_interceptor): + transport = transports.TextServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.TextServiceRestInterceptor(), + ) + client = TextServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.TextServiceRestInterceptor, "post_embed_text" + ) as post, mock.patch.object( + transports.TextServiceRestInterceptor, "pre_embed_text" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = text_service.EmbedTextRequest.pb(text_service.EmbedTextRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = text_service.EmbedTextResponse.to_json( + text_service.EmbedTextResponse() + ) + + request = text_service.EmbedTextRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = text_service.EmbedTextResponse() + + client.embed_text( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_embed_text_rest_bad_request( + transport: str = "rest", request_type=text_service.EmbedTextRequest +): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"model": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.embed_text(request) + + +def test_embed_text_rest_flattened(): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = text_service.EmbedTextResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"model": "models/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + model="model_value", + text="text_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = text_service.EmbedTextResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.embed_text(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{model=models/*}:embedText" % client.transport._host, args[1] + ) + + +def test_embed_text_rest_flattened_error(transport: str = "rest"): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.embed_text( + text_service.EmbedTextRequest(), + model="model_value", + text="text_value", + ) + + +def test_embed_text_rest_error(): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + text_service.BatchEmbedTextRequest, + dict, + ], +) +def test_batch_embed_text_rest(request_type): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"model": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = text_service.BatchEmbedTextResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = text_service.BatchEmbedTextResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.batch_embed_text(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, text_service.BatchEmbedTextResponse) + + +def test_batch_embed_text_rest_required_fields( + request_type=text_service.BatchEmbedTextRequest, +): + transport_class = transports.TextServiceRestTransport + + request_init = {} + request_init["model"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).batch_embed_text._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["model"] = "model_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).batch_embed_text._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "model" in jsonified_request + assert jsonified_request["model"] == "model_value" + + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = text_service.BatchEmbedTextResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = text_service.BatchEmbedTextResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.batch_embed_text(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_batch_embed_text_rest_unset_required_fields(): + transport = transports.TextServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.batch_embed_text._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("model",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_batch_embed_text_rest_interceptors(null_interceptor): + transport = transports.TextServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.TextServiceRestInterceptor(), + ) + client = TextServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.TextServiceRestInterceptor, "post_batch_embed_text" + ) as post, mock.patch.object( + transports.TextServiceRestInterceptor, "pre_batch_embed_text" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = text_service.BatchEmbedTextRequest.pb( + text_service.BatchEmbedTextRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = text_service.BatchEmbedTextResponse.to_json( + text_service.BatchEmbedTextResponse() + ) + + request = text_service.BatchEmbedTextRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = text_service.BatchEmbedTextResponse() + + client.batch_embed_text( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_batch_embed_text_rest_bad_request( + transport: str = "rest", request_type=text_service.BatchEmbedTextRequest +): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"model": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.batch_embed_text(request) + + +def test_batch_embed_text_rest_flattened(): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = text_service.BatchEmbedTextResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"model": "models/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + model="model_value", + texts=["texts_value"], + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = text_service.BatchEmbedTextResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.batch_embed_text(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{model=models/*}:batchEmbedText" % client.transport._host, + args[1], + ) + + +def test_batch_embed_text_rest_flattened_error(transport: str = "rest"): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_embed_text( + text_service.BatchEmbedTextRequest(), + model="model_value", + texts=["texts_value"], + ) + + +def test_batch_embed_text_rest_error(): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + text_service.CountTextTokensRequest, + dict, + ], +) +def test_count_text_tokens_rest(request_type): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"model": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = text_service.CountTextTokensResponse( + token_count=1193, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = text_service.CountTextTokensResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.count_text_tokens(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, text_service.CountTextTokensResponse) + assert response.token_count == 1193 + + +def test_count_text_tokens_rest_required_fields( + request_type=text_service.CountTextTokensRequest, +): + transport_class = transports.TextServiceRestTransport + + request_init = {} + request_init["model"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).count_text_tokens._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["model"] = "model_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).count_text_tokens._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "model" in jsonified_request + assert jsonified_request["model"] == "model_value" + + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = text_service.CountTextTokensResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = text_service.CountTextTokensResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.count_text_tokens(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_count_text_tokens_rest_unset_required_fields(): + transport = transports.TextServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.count_text_tokens._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "model", + "prompt", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_count_text_tokens_rest_interceptors(null_interceptor): + transport = transports.TextServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.TextServiceRestInterceptor(), + ) + client = TextServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.TextServiceRestInterceptor, "post_count_text_tokens" + ) as post, mock.patch.object( + transports.TextServiceRestInterceptor, "pre_count_text_tokens" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = text_service.CountTextTokensRequest.pb( + text_service.CountTextTokensRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = text_service.CountTextTokensResponse.to_json( + text_service.CountTextTokensResponse() + ) + + request = text_service.CountTextTokensRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = text_service.CountTextTokensResponse() + + client.count_text_tokens( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_count_text_tokens_rest_bad_request( + transport: str = "rest", request_type=text_service.CountTextTokensRequest +): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"model": "models/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.count_text_tokens(request) + + +def test_count_text_tokens_rest_flattened(): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = text_service.CountTextTokensResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"model": "models/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + model="model_value", + prompt=text_service.TextPrompt(text="text_value"), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = text_service.CountTextTokensResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.count_text_tokens(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta/{model=models/*}:countTextTokens" % client.transport._host, + args[1], + ) + + +def test_count_text_tokens_rest_flattened_error(transport: str = "rest"): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.count_text_tokens( + text_service.CountTextTokensRequest(), + model="model_value", + prompt=text_service.TextPrompt(text="text_value"), + ) + + +def test_count_text_tokens_rest_error(): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.TextServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.TextServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TextServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.TextServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = TextServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = TextServiceClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.TextServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TextServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.TextServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = TextServiceClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.TextServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.TextServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.TextServiceGrpcTransport, + transports.TextServiceGrpcAsyncIOTransport, + transports.TextServiceRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "rest", + ], +) +def test_transport_kind(transport_name): + transport = TextServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.TextServiceGrpcTransport, + ) + + +def test_text_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.TextServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_text_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.ai.generativelanguage_v1beta.services.text_service.transports.TextServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.TextServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "generate_text", + "embed_text", + "batch_embed_text", + "count_text_tokens", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_text_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch( + "google.ai.generativelanguage_v1beta.services.text_service.transports.TextServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.TextServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=None, + default_scopes=(), + quota_project_id="octopus", + ) + + +def test_text_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( + "google.ai.generativelanguage_v1beta.services.text_service.transports.TextServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.TextServiceTransport() + adc.assert_called_once() + + +def test_text_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + TextServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=(), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.TextServiceGrpcTransport, + transports.TextServiceGrpcAsyncIOTransport, + ], +) +def test_text_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=(), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.TextServiceGrpcTransport, + transports.TextServiceGrpcAsyncIOTransport, + transports.TextServiceRestTransport, + ], +) +def test_text_service_transport_auth_gdch_credentials(transport_class): + host = "https://language.com" + api_audience_tests = [None, "https://language2.com"] + api_audience_expect = [host, "https://language2.com"] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, "default", autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock( + return_value=gdch_mock + ) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with(e) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.TextServiceGrpcTransport, grpc_helpers), + (transports.TextServiceGrpcAsyncIOTransport, grpc_helpers_async), + ], +) +def test_text_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + + create_channel.assert_called_with( + "generativelanguage.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=(), + scopes=["1", "2"], + default_host="generativelanguage.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "transport_class", + [transports.TextServiceGrpcTransport, transports.TextServiceGrpcAsyncIOTransport], +) +def test_text_service_grpc_transport_client_cert_source_for_mtls(transport_class): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_text_service_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ) as mock_configure_mtls_channel: + transports.TextServiceRestTransport( + credentials=cred, client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_text_service_host_no_port(transport_name): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="generativelanguage.googleapis.com" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "generativelanguage.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://generativelanguage.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_text_service_host_with_port(transport_name): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="generativelanguage.googleapis.com:8000" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "generativelanguage.googleapis.com:8000" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://generativelanguage.googleapis.com:8000" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "rest", + ], +) +def test_text_service_client_transport_session_collision(transport_name): + creds1 = ga_credentials.AnonymousCredentials() + creds2 = ga_credentials.AnonymousCredentials() + client1 = TextServiceClient( + credentials=creds1, + transport=transport_name, + ) + client2 = TextServiceClient( + credentials=creds2, + transport=transport_name, + ) + session1 = client1.transport.generate_text._session + session2 = client2.transport.generate_text._session + assert session1 != session2 + session1 = client1.transport.embed_text._session + session2 = client2.transport.embed_text._session + assert session1 != session2 + session1 = client1.transport.batch_embed_text._session + session2 = client2.transport.batch_embed_text._session + assert session1 != session2 + session1 = client1.transport.count_text_tokens._session + session2 = client2.transport.count_text_tokens._session + assert session1 != session2 + + +def test_text_service_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.TextServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_text_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.TextServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [transports.TextServiceGrpcTransport, transports.TextServiceGrpcAsyncIOTransport], +) +def test_text_service_transport_channel_mtls_with_client_cert_source(transport_class): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [transports.TextServiceGrpcTransport, transports.TextServiceGrpcAsyncIOTransport], +) +def test_text_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_model_path(): + model = "squid" + expected = "models/{model}".format( + model=model, + ) + actual = TextServiceClient.model_path(model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "model": "clam", + } + path = TextServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = TextServiceClient.parse_model_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "whelk" + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = TextServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "octopus", + } + path = TextServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = TextServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "oyster" + expected = "folders/{folder}".format( + folder=folder, + ) + actual = TextServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nudibranch", + } + path = TextServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = TextServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "cuttlefish" + expected = "organizations/{organization}".format( + organization=organization, + ) + actual = TextServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "mussel", + } + path = TextServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = TextServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "winkle" + expected = "projects/{project}".format( + project=project, + ) + actual = TextServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "nautilus", + } + path = TextServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = TextServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "scallop" + location = "abalone" + expected = "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + actual = TextServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "squid", + "location": "clam", + } + path = TextServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = TextServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.TextServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.TextServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = TextServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = TextServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "rest": "_session", + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "rest", + "grpc", + ] + for transport in transports: + client = TextServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + + +@pytest.mark.parametrize( + "client_class,transport_class", + [ + (TextServiceClient, transports.TextServiceGrpcTransport), + (TextServiceAsyncClient, transports.TextServiceGrpcAsyncIOTransport), + ], +) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) From ea5da9dbd73abf805a0e7231c2f24fcf1fd9d925 Mon Sep 17 00:00:00 2001 From: yoshi-code-bot <70984784+yoshi-code-bot@users.noreply.github.com> Date: Sat, 9 Dec 2023 06:00:12 -0800 Subject: [PATCH 08/80] chore: Update release-please config files (#12108) Update release-please config files --- release-please-config.json | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/release-please-config.json b/release-please-config.json index 0bbbc1a98a0e..78f3df3a656f 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -6,8 +6,20 @@ "component": "google-ai-generativelanguage", "extra-files": [ "google/ai/generativelanguage/gapic_version.py", + "google/ai/generativelanguage_v1/gapic_version.py", + "google/ai/generativelanguage_v1beta/gapic_version.py", "google/ai/generativelanguage_v1beta2/gapic_version.py", "google/ai/generativelanguage_v1beta3/gapic_version.py", + { + "jsonpath": "$.clientLibrary.version", + "path": "samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1.json", + "type": "json" + }, + { + "jsonpath": "$.clientLibrary.version", + "path": "samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1beta.json", + "type": "json" + }, { "jsonpath": "$.clientLibrary.version", "path": "samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1beta2.json", From bd7589f5b35b958af65d5f37eb9d582539f09263 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Mon, 11 Dec 2023 10:11:47 -0500 Subject: [PATCH 09/80] chore: release main (#12107) :robot: I have created a release *beep* *boop* ---
google-ai-generativelanguage: 0.4.0 ## [0.4.0](https://github.com/googleapis/google-cloud-python/compare/google-ai-generativelanguage-v0.3.5...google-ai-generativelanguage-v0.4.0) (2023-12-09) ### Features * Add v1, contains only GenerativeService, nothing else ([23d8814](https://github.com/googleapis/google-cloud-python/commit/23d8814baa6288d94484d52a98714fd32755ada3)) * Add v1beta, adds GenerativeService and RetrievalService ([23d8814](https://github.com/googleapis/google-cloud-python/commit/23d8814baa6288d94484d52a98714fd32755ada3)) * Set `google.ai.generativelanguage_v1beta` as the default import ([23d8814](https://github.com/googleapis/google-cloud-python/commit/23d8814baa6288d94484d52a98714fd32755ada3))
google-analytics-data: 0.18.2 ## [0.18.2](https://github.com/googleapis/google-cloud-python/compare/google-analytics-data-v0.18.1...google-analytics-data-v0.18.2) (2023-12-09) ### Features * [google-analytics-data] add `CreateAudienceExport`, `QueryAudienceExport`, `GetAudienceExport`, `ListAudienceExports` methods to the Data API v1 beta ([182c4cf](https://github.com/googleapis/google-cloud-python/commit/182c4cf16e7e1eef2819396a5a0b590a81af6a58)) * add `sampling_metadatas` field to `ResponseMetaData` ([182c4cf](https://github.com/googleapis/google-cloud-python/commit/182c4cf16e7e1eef2819396a5a0b590a81af6a58)) * add `SamplingMetadata`, `AudienceExport`, `AudienceExportMetadata`, `AudienceDimensionValue` types ([182c4cf](https://github.com/googleapis/google-cloud-python/commit/182c4cf16e7e1eef2819396a5a0b590a81af6a58)) ### Bug Fixes * add `optional` label to `consumed`, `remaining` fields of the `QuotaStatus` type ([182c4cf](https://github.com/googleapis/google-cloud-python/commit/182c4cf16e7e1eef2819396a5a0b590a81af6a58)) ### Documentation * updated comments ([182c4cf](https://github.com/googleapis/google-cloud-python/commit/182c4cf16e7e1eef2819396a5a0b590a81af6a58))
--- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- .release-please-manifest.json | 4 ++-- .../google-ai-generativelanguage/CHANGELOG.md | 9 +++++++++ .../ai/generativelanguage/gapic_version.py | 2 +- .../ai/generativelanguage_v1/gapic_version.py | 2 +- .../gapic_version.py | 2 +- .../gapic_version.py | 2 +- .../gapic_version.py | 2 +- ...adata_google.ai.generativelanguage.v1.json | 2 +- ...a_google.ai.generativelanguage.v1beta.json | 2 +- ..._google.ai.generativelanguage.v1beta2.json | 2 +- ..._google.ai.generativelanguage.v1beta3.json | 2 +- packages/google-analytics-data/CHANGELOG.md | 19 +++++++++++++++++++ .../google/analytics/data/gapic_version.py | 2 +- .../analytics/data_v1alpha/gapic_version.py | 2 +- .../analytics/data_v1beta/gapic_version.py | 2 +- ...etadata_google.analytics.data.v1alpha.json | 2 +- ...metadata_google.analytics.data.v1beta.json | 2 +- 17 files changed, 44 insertions(+), 16 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 9a267657b196..fb98d85a5c99 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,7 +1,7 @@ { - "packages/google-ai-generativelanguage": "0.3.5", + "packages/google-ai-generativelanguage": "0.4.0", "packages/google-analytics-admin": "0.22.1", - "packages/google-analytics-data": "0.18.1", + "packages/google-analytics-data": "0.18.2", "packages/google-apps-meet": "0.1.0", "packages/google-apps-script-type": "0.3.5", "packages/google-area120-tables": "0.11.5", diff --git a/packages/google-ai-generativelanguage/CHANGELOG.md b/packages/google-ai-generativelanguage/CHANGELOG.md index 5f1b80e625f3..e2ca71ea8536 100644 --- a/packages/google-ai-generativelanguage/CHANGELOG.md +++ b/packages/google-ai-generativelanguage/CHANGELOG.md @@ -1,5 +1,14 @@ # Changelog +## [0.4.0](https://github.com/googleapis/google-cloud-python/compare/google-ai-generativelanguage-v0.3.5...google-ai-generativelanguage-v0.4.0) (2023-12-09) + + +### Features + +* Add v1, contains only GenerativeService, nothing else ([23d8814](https://github.com/googleapis/google-cloud-python/commit/23d8814baa6288d94484d52a98714fd32755ada3)) +* Add v1beta, adds GenerativeService and RetrievalService ([23d8814](https://github.com/googleapis/google-cloud-python/commit/23d8814baa6288d94484d52a98714fd32755ada3)) +* Set `google.ai.generativelanguage_v1beta` as the default import ([23d8814](https://github.com/googleapis/google-cloud-python/commit/23d8814baa6288d94484d52a98714fd32755ada3)) + ## [0.3.5](https://github.com/googleapis/google-cloud-python/compare/google-ai-generativelanguage-v0.3.4...google-ai-generativelanguage-v0.3.5) (2023-12-07) diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage/gapic_version.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage/gapic_version.py index 360a0d13ebdd..8ac855809454 100644 --- a/packages/google-ai-generativelanguage/google/ai/generativelanguage/gapic_version.py +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "0.0.0" # {x-release-please-version} +__version__ = "0.4.0" # {x-release-please-version} diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/gapic_version.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/gapic_version.py index 360a0d13ebdd..8ac855809454 100644 --- a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/gapic_version.py +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "0.0.0" # {x-release-please-version} +__version__ = "0.4.0" # {x-release-please-version} diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/gapic_version.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/gapic_version.py index 360a0d13ebdd..8ac855809454 100644 --- a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/gapic_version.py +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "0.0.0" # {x-release-please-version} +__version__ = "0.4.0" # {x-release-please-version} diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta2/gapic_version.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta2/gapic_version.py index 360a0d13ebdd..8ac855809454 100644 --- a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta2/gapic_version.py +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "0.0.0" # {x-release-please-version} +__version__ = "0.4.0" # {x-release-please-version} diff --git a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta3/gapic_version.py b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta3/gapic_version.py index 360a0d13ebdd..8ac855809454 100644 --- a/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta3/gapic_version.py +++ b/packages/google-ai-generativelanguage/google/ai/generativelanguage_v1beta3/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "0.0.0" # {x-release-please-version} +__version__ = "0.4.0" # {x-release-please-version} diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1.json b/packages/google-ai-generativelanguage/samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1.json index d6c3fe4c5051..2ce8be026282 100644 --- a/packages/google-ai-generativelanguage/samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1.json +++ b/packages/google-ai-generativelanguage/samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-ai-generativelanguage", - "version": "0.1.0" + "version": "0.4.0" }, "snippets": [ { diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1beta.json b/packages/google-ai-generativelanguage/samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1beta.json index 1755de17b8e8..9f5b6633ff87 100644 --- a/packages/google-ai-generativelanguage/samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1beta.json +++ b/packages/google-ai-generativelanguage/samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1beta.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-ai-generativelanguage", - "version": "0.1.0" + "version": "0.4.0" }, "snippets": [ { diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1beta2.json b/packages/google-ai-generativelanguage/samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1beta2.json index 5b7d0a0509b4..f54b8776c427 100644 --- a/packages/google-ai-generativelanguage/samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1beta2.json +++ b/packages/google-ai-generativelanguage/samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1beta2.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-ai-generativelanguage", - "version": "0.1.0" + "version": "0.4.0" }, "snippets": [ { diff --git a/packages/google-ai-generativelanguage/samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1beta3.json b/packages/google-ai-generativelanguage/samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1beta3.json index 91de9e353f90..e23a99227b6a 100644 --- a/packages/google-ai-generativelanguage/samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1beta3.json +++ b/packages/google-ai-generativelanguage/samples/generated_samples/snippet_metadata_google.ai.generativelanguage.v1beta3.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-ai-generativelanguage", - "version": "0.1.0" + "version": "0.4.0" }, "snippets": [ { diff --git a/packages/google-analytics-data/CHANGELOG.md b/packages/google-analytics-data/CHANGELOG.md index ece5527160bc..a30ab5ba3ac0 100644 --- a/packages/google-analytics-data/CHANGELOG.md +++ b/packages/google-analytics-data/CHANGELOG.md @@ -1,5 +1,24 @@ # Changelog +## [0.18.2](https://github.com/googleapis/google-cloud-python/compare/google-analytics-data-v0.18.1...google-analytics-data-v0.18.2) (2023-12-09) + + +### Features + +* [google-analytics-data] add `CreateAudienceExport`, `QueryAudienceExport`, `GetAudienceExport`, `ListAudienceExports` methods to the Data API v1 beta ([182c4cf](https://github.com/googleapis/google-cloud-python/commit/182c4cf16e7e1eef2819396a5a0b590a81af6a58)) +* add `sampling_metadatas` field to `ResponseMetaData` ([182c4cf](https://github.com/googleapis/google-cloud-python/commit/182c4cf16e7e1eef2819396a5a0b590a81af6a58)) +* add `SamplingMetadata`, `AudienceExport`, `AudienceExportMetadata`, `AudienceDimensionValue` types ([182c4cf](https://github.com/googleapis/google-cloud-python/commit/182c4cf16e7e1eef2819396a5a0b590a81af6a58)) + + +### Bug Fixes + +* add `optional` label to `consumed`, `remaining` fields of the `QuotaStatus` type ([182c4cf](https://github.com/googleapis/google-cloud-python/commit/182c4cf16e7e1eef2819396a5a0b590a81af6a58)) + + +### Documentation + +* updated comments ([182c4cf](https://github.com/googleapis/google-cloud-python/commit/182c4cf16e7e1eef2819396a5a0b590a81af6a58)) + ## [0.18.1](https://github.com/googleapis/google-cloud-python/compare/google-analytics-data-v0.18.0...google-analytics-data-v0.18.1) (2023-12-07) diff --git a/packages/google-analytics-data/google/analytics/data/gapic_version.py b/packages/google-analytics-data/google/analytics/data/gapic_version.py index 360a0d13ebdd..c50bd06de962 100644 --- a/packages/google-analytics-data/google/analytics/data/gapic_version.py +++ b/packages/google-analytics-data/google/analytics/data/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "0.0.0" # {x-release-please-version} +__version__ = "0.18.2" # {x-release-please-version} diff --git a/packages/google-analytics-data/google/analytics/data_v1alpha/gapic_version.py b/packages/google-analytics-data/google/analytics/data_v1alpha/gapic_version.py index 360a0d13ebdd..c50bd06de962 100644 --- a/packages/google-analytics-data/google/analytics/data_v1alpha/gapic_version.py +++ b/packages/google-analytics-data/google/analytics/data_v1alpha/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "0.0.0" # {x-release-please-version} +__version__ = "0.18.2" # {x-release-please-version} diff --git a/packages/google-analytics-data/google/analytics/data_v1beta/gapic_version.py b/packages/google-analytics-data/google/analytics/data_v1beta/gapic_version.py index 360a0d13ebdd..c50bd06de962 100644 --- a/packages/google-analytics-data/google/analytics/data_v1beta/gapic_version.py +++ b/packages/google-analytics-data/google/analytics/data_v1beta/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "0.0.0" # {x-release-please-version} +__version__ = "0.18.2" # {x-release-please-version} diff --git a/packages/google-analytics-data/samples/generated_samples/snippet_metadata_google.analytics.data.v1alpha.json b/packages/google-analytics-data/samples/generated_samples/snippet_metadata_google.analytics.data.v1alpha.json index f35436636628..81758782c5bd 100644 --- a/packages/google-analytics-data/samples/generated_samples/snippet_metadata_google.analytics.data.v1alpha.json +++ b/packages/google-analytics-data/samples/generated_samples/snippet_metadata_google.analytics.data.v1alpha.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-analytics-data", - "version": "0.1.0" + "version": "0.18.2" }, "snippets": [ { diff --git a/packages/google-analytics-data/samples/generated_samples/snippet_metadata_google.analytics.data.v1beta.json b/packages/google-analytics-data/samples/generated_samples/snippet_metadata_google.analytics.data.v1beta.json index 29e86a085403..86f0a1056f52 100644 --- a/packages/google-analytics-data/samples/generated_samples/snippet_metadata_google.analytics.data.v1beta.json +++ b/packages/google-analytics-data/samples/generated_samples/snippet_metadata_google.analytics.data.v1beta.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-analytics-data", - "version": "0.1.0" + "version": "0.18.2" }, "snippets": [ { From e6b5ea0a5b52ae56c944457c533b0ad225020357 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Mon, 11 Dec 2023 16:13:35 +0100 Subject: [PATCH 10/80] chore(deps): update actions/setup-python action to v5 (#12109) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [![Mend Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [actions/setup-python](https://togithub.com/actions/setup-python) | action | major | `v4` -> `v5` | --- ### Release Notes
actions/setup-python (actions/setup-python) ### [`v5`](https://togithub.com/actions/setup-python/compare/v4...v5) [Compare Source](https://togithub.com/actions/setup-python/compare/v4...v5)
--- ### Configuration 📅 **Schedule**: Branch creation - "before 4am on Monday" (UTC), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Mend Renovate](https://www.mend.io/free-developer-tools/renovate/). View repository job log [here](https://developer.mend.io/github/googleapis/google-cloud-python). --- .github/workflows/configure_release_please.yml | 2 +- .github/workflows/docs.yml | 4 ++-- .github/workflows/lint.yml | 2 +- .github/workflows/main.yml | 2 +- .github/workflows/scripts.yml | 2 +- .github/workflows/unittest.yml | 6 +++--- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/configure_release_please.yml b/.github/workflows/configure_release_please.yml index 07929edb0a8b..d3a3f2b53420 100644 --- a/.github/workflows/configure_release_please.yml +++ b/.github/workflows/configure_release_please.yml @@ -42,7 +42,7 @@ jobs: with: fetch-depth: 2 - name: Set up Python 3.11 - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: 3.11 - name: Run configure_release_please.py diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 32e91b43a909..498aeef0c4bb 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -19,7 +19,7 @@ jobs: with: fetch-depth: 2 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install nox @@ -44,7 +44,7 @@ jobs: with: fetch-depth: 2 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install nox diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 07df25ffe0ea..3de33f190b14 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -19,7 +19,7 @@ jobs: with: fetch-depth: 2 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install nox diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index aa318acba43b..7d306909a816 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -43,7 +43,7 @@ jobs: with: fetch-depth: 2 - name: Set up Python 3.10 - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: 3.10 - name: Install script dependencies diff --git a/.github/workflows/scripts.yml b/.github/workflows/scripts.yml index 5268282f8fe0..7621bf969ea1 100644 --- a/.github/workflows/scripts.yml +++ b/.github/workflows/scripts.yml @@ -19,7 +19,7 @@ jobs: with: fetch-depth: 2 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install pytest diff --git a/.github/workflows/unittest.yml b/.github/workflows/unittest.yml index 36e878edb62c..fcd80758d8ef 100644 --- a/.github/workflows/unittest.yml +++ b/.github/workflows/unittest.yml @@ -22,7 +22,7 @@ jobs: with: fetch-depth: 2 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - name: Install nox @@ -56,7 +56,7 @@ jobs: with: fetch-depth: 2 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - name: Install nox @@ -84,7 +84,7 @@ jobs: with: fetch-depth: 2 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: "3.10" - name: Set number of files changes in packages directory From 8832a03cb0de53f3e30ca53899091a0a3433a409 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Tue, 12 Dec 2023 13:06:09 -0500 Subject: [PATCH 11/80] feat: [google-cloud-recommender] Support cost_in_local_currency field in the cost projection (#12112) BEGIN_COMMIT_OVERRIDE feat: Support cost_in_local_currency field in the cost projection docs: Fix typo for the comment of reliability_projection docs: Add comment for targetResources END_COMMIT_OVERRIDE - [ ] Regenerate this pull request now. docs: Fix typo for the comment of reliability_projection docs: Add comment for targetResources PiperOrigin-RevId: 589982370 Source-Link: https://github.com/googleapis/googleapis/commit/b7abf5c7949ac890bb5c5f9810a8e7ee4fec85f5 Source-Link: https://github.com/googleapis/googleapis-gen/commit/1617d1ede2a05c13289cfea4883bcdf9116f564e Copy-Tag: eyJwIjoicGFja2FnZXMvZ29vZ2xlLWNsb3VkLXJlY29tbWVuZGVyLy5Pd2xCb3QueWFtbCIsImgiOiIxNjE3ZDFlZGUyYTA1YzEzMjg5Y2ZlYTQ4ODNiY2RmOTExNmY1NjRlIn0= --------- Co-authored-by: Owl Bot --- .../google/cloud/recommender/gapic_version.py | 2 +- .../google/cloud/recommender_v1/gapic_version.py | 2 +- .../services/recommender/async_client.py | 6 ++++++ .../recommender_v1/services/recommender/client.py | 6 ++++++ .../cloud/recommender_v1/types/recommendation.py | 10 +++++++++- .../recommender_v1/types/recommender_service.py | 14 +++++++++++++- .../cloud/recommender_v1beta1/gapic_version.py | 2 +- ...ippet_metadata_google.cloud.recommender.v1.json | 2 +- ..._metadata_google.cloud.recommender.v1beta1.json | 2 +- 9 files changed, 39 insertions(+), 7 deletions(-) diff --git a/packages/google-cloud-recommender/google/cloud/recommender/gapic_version.py b/packages/google-cloud-recommender/google/cloud/recommender/gapic_version.py index 13e6df46e52c..360a0d13ebdd 100644 --- a/packages/google-cloud-recommender/google/cloud/recommender/gapic_version.py +++ b/packages/google-cloud-recommender/google/cloud/recommender/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.13.0" # {x-release-please-version} +__version__ = "0.0.0" # {x-release-please-version} diff --git a/packages/google-cloud-recommender/google/cloud/recommender_v1/gapic_version.py b/packages/google-cloud-recommender/google/cloud/recommender_v1/gapic_version.py index 13e6df46e52c..360a0d13ebdd 100644 --- a/packages/google-cloud-recommender/google/cloud/recommender_v1/gapic_version.py +++ b/packages/google-cloud-recommender/google/cloud/recommender_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.13.0" # {x-release-please-version} +__version__ = "0.0.0" # {x-release-please-version} diff --git a/packages/google-cloud-recommender/google/cloud/recommender_v1/services/recommender/async_client.py b/packages/google-cloud-recommender/google/cloud/recommender_v1/services/recommender/async_client.py index 78ab904a6ce0..ecfe1fffab73 100644 --- a/packages/google-cloud-recommender/google/cloud/recommender_v1/services/recommender/async_client.py +++ b/packages/google-cloud-recommender/google/cloud/recommender_v1/services/recommender/async_client.py @@ -708,6 +708,8 @@ async def sample_list_recommendations(): - ``priority`` + - ``targetResources`` + Examples: - ``stateInfo.state = ACTIVE OR stateInfo.state = DISMISSED`` @@ -716,8 +718,12 @@ async def sample_list_recommendations(): - ``priority = P1 OR priority = P2`` + - ``targetResources : //compute.googleapis.com/projects/1234/zones/us-central1-a/instances/instance-1`` + - ``stateInfo.state = ACTIVE AND (priority = P1 OR priority = P2)`` + The max allowed filter length is 500 characters. + (These expressions are based on the filter language described at https://google.aip.dev/160) diff --git a/packages/google-cloud-recommender/google/cloud/recommender_v1/services/recommender/client.py b/packages/google-cloud-recommender/google/cloud/recommender_v1/services/recommender/client.py index a6311259c03a..c446fff128fd 100644 --- a/packages/google-cloud-recommender/google/cloud/recommender_v1/services/recommender/client.py +++ b/packages/google-cloud-recommender/google/cloud/recommender_v1/services/recommender/client.py @@ -1018,6 +1018,8 @@ def sample_list_recommendations(): - ``priority`` + - ``targetResources`` + Examples: - ``stateInfo.state = ACTIVE OR stateInfo.state = DISMISSED`` @@ -1026,8 +1028,12 @@ def sample_list_recommendations(): - ``priority = P1 OR priority = P2`` + - ``targetResources : //compute.googleapis.com/projects/1234/zones/us-central1-a/instances/instance-1`` + - ``stateInfo.state = ACTIVE AND (priority = P1 OR priority = P2)`` + The max allowed filter length is 500 characters. + (These expressions are based on the filter language described at https://google.aip.dev/160) diff --git a/packages/google-cloud-recommender/google/cloud/recommender_v1/types/recommendation.py b/packages/google-cloud-recommender/google/cloud/recommender_v1/types/recommendation.py index c35448f7b67e..0349d5bf8037 100644 --- a/packages/google-cloud-recommender/google/cloud/recommender_v1/types/recommendation.py +++ b/packages/google-cloud-recommender/google/cloud/recommender_v1/types/recommendation.py @@ -439,6 +439,9 @@ class CostProjection(proto.Message): prices. duration (google.protobuf.duration_pb2.Duration): Duration for which this cost applies. + cost_in_local_currency (google.type.money_pb2.Money): + The approximate cost savings in the billing + account's local currency. """ cost: money_pb2.Money = proto.Field( @@ -451,6 +454,11 @@ class CostProjection(proto.Message): number=2, message=duration_pb2.Duration, ) + cost_in_local_currency: money_pb2.Money = proto.Field( + proto.MESSAGE, + number=3, + message=money_pb2.Money, + ) class SecurityProjection(proto.Message): @@ -564,7 +572,7 @@ class Impact(proto.Message): This field is a member of `oneof`_ ``projection``. reliability_projection (google.cloud.recommender_v1.types.ReliabilityProjection): - Use with CategoryType.RELAIBILITY + Use with CategoryType.RELIABILITY This field is a member of `oneof`_ ``projection``. """ diff --git a/packages/google-cloud-recommender/google/cloud/recommender_v1/types/recommender_service.py b/packages/google-cloud-recommender/google/cloud/recommender_v1/types/recommender_service.py index 1b0c820877bc..160aa62483fe 100644 --- a/packages/google-cloud-recommender/google/cloud/recommender_v1/types/recommender_service.py +++ b/packages/google-cloud-recommender/google/cloud/recommender_v1/types/recommender_service.py @@ -94,6 +94,8 @@ class ListInsightsRequest(proto.Message): - ``severity`` + - ``targetResources`` + Examples: - ``stateInfo.state = ACTIVE OR stateInfo.state = DISMISSED`` @@ -102,8 +104,12 @@ class ListInsightsRequest(proto.Message): - ``severity = CRITICAL OR severity = HIGH`` + - ``targetResources : //compute.googleapis.com/projects/1234/zones/us-central1-a/instances/instance-1`` + - ``stateInfo.state = ACTIVE AND (severity = CRITICAL OR severity = HIGH)`` + The max allowed filter length is 500 characters. + (These expressions are based on the filter language described at https://google.aip.dev/160) """ @@ -239,6 +245,8 @@ class ListRecommendationsRequest(proto.Message): - ``priority`` + - ``targetResources`` + Examples: - ``stateInfo.state = ACTIVE OR stateInfo.state = DISMISSED`` @@ -247,8 +255,12 @@ class ListRecommendationsRequest(proto.Message): - ``priority = P1 OR priority = P2`` + - ``targetResources : //compute.googleapis.com/projects/1234/zones/us-central1-a/instances/instance-1`` + - ``stateInfo.state = ACTIVE AND (priority = P1 OR priority = P2)`` + The max allowed filter length is 500 characters. + (These expressions are based on the filter language described at https://google.aip.dev/160) """ @@ -319,7 +331,7 @@ class MarkRecommendationDismissedRequest(proto.Message): Attributes: name (str): - Name of the recommendation. + Required. Name of the recommendation. etag (str): Fingerprint of the Recommendation. Provides optimistic locking. diff --git a/packages/google-cloud-recommender/google/cloud/recommender_v1beta1/gapic_version.py b/packages/google-cloud-recommender/google/cloud/recommender_v1beta1/gapic_version.py index 13e6df46e52c..360a0d13ebdd 100644 --- a/packages/google-cloud-recommender/google/cloud/recommender_v1beta1/gapic_version.py +++ b/packages/google-cloud-recommender/google/cloud/recommender_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.13.0" # {x-release-please-version} +__version__ = "0.0.0" # {x-release-please-version} diff --git a/packages/google-cloud-recommender/samples/generated_samples/snippet_metadata_google.cloud.recommender.v1.json b/packages/google-cloud-recommender/samples/generated_samples/snippet_metadata_google.cloud.recommender.v1.json index e027227c237f..5c470a773f2a 100644 --- a/packages/google-cloud-recommender/samples/generated_samples/snippet_metadata_google.cloud.recommender.v1.json +++ b/packages/google-cloud-recommender/samples/generated_samples/snippet_metadata_google.cloud.recommender.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-recommender", - "version": "2.13.0" + "version": "0.1.0" }, "snippets": [ { diff --git a/packages/google-cloud-recommender/samples/generated_samples/snippet_metadata_google.cloud.recommender.v1beta1.json b/packages/google-cloud-recommender/samples/generated_samples/snippet_metadata_google.cloud.recommender.v1beta1.json index fac0335f8577..e4ff805e99aa 100644 --- a/packages/google-cloud-recommender/samples/generated_samples/snippet_metadata_google.cloud.recommender.v1beta1.json +++ b/packages/google-cloud-recommender/samples/generated_samples/snippet_metadata_google.cloud.recommender.v1beta1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-recommender", - "version": "2.13.0" + "version": "0.1.0" }, "snippets": [ { From 0fc00b8514fa29dd183381e5dac8f712a37c2f34 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Tue, 12 Dec 2023 13:43:48 -0500 Subject: [PATCH 12/80] feat: [google-cloud-asset] added messages ExportAssetsResponse, BatchGetAssetsHistoryResponse (#12111) BEGIN_COMMIT_OVERRIDE feat: added messages ExportAssetsResponse, BatchGetAssetsHistoryResponse feat: added Asset.access_policy, access_level, service_perimeter, org_policy feat: added resource definitions to some messages docs: updated comments chore: removed backend configuration from service config END_COMMIT_OVERRIDE - [ ] Regenerate this pull request now. feat: added Asset.access_policy, access_level, service_perimeter, org_policy feat: added resource definitions to some messages docs: updated comments chore: removed backend configuration from service config PiperOrigin-RevId: 589961470 Source-Link: https://github.com/googleapis/googleapis/commit/02fbe750cda7ed62049f0d2b44798c005546a2af Source-Link: https://github.com/googleapis/googleapis-gen/commit/2a25ee3638abdc2ea520c87e1d4c7c53fc624e30 Copy-Tag: eyJwIjoicGFja2FnZXMvZ29vZ2xlLWNsb3VkLWFzc2V0Ly5Pd2xCb3QueWFtbCIsImgiOiIyYTI1ZWUzNjM4YWJkYzJlYTUyMGM4N2UxZDRjN2M1M2ZjNjI0ZTMwIn0= --------- Co-authored-by: Owl Bot Co-authored-by: ohmayr --- .../google/cloud/asset/gapic_version.py | 2 +- .../google/cloud/asset_v1/gapic_version.py | 2 +- .../cloud/asset_v1p1beta1/gapic_version.py | 2 +- .../google/cloud/asset_v1p2beta1/__init__.py | 4 + .../cloud/asset_v1p2beta1/gapic_version.py | 2 +- .../services/asset_service/async_client.py | 60 ++++- .../services/asset_service/client.py | 60 ++++- .../services/asset_service/transports/base.py | 10 + .../services/asset_service/transports/grpc.py | 18 ++ .../asset_service/transports/grpc_asyncio.py | 18 ++ .../services/asset_service/transports/rest.py | 90 ++++++++ .../cloud/asset_v1p2beta1/types/__init__.py | 4 + .../asset_v1p2beta1/types/asset_service.py | 63 +++++- .../cloud/asset_v1p2beta1/types/assets.py | 179 ++++++++++----- .../cloud/asset_v1p5beta1/gapic_version.py | 2 +- ...nippet_metadata_google.cloud.asset.v1.json | 2 +- ...metadata_google.cloud.asset.v1p1beta1.json | 2 +- ...metadata_google.cloud.asset.v1p2beta1.json | 2 +- ...metadata_google.cloud.asset.v1p5beta1.json | 2 +- .../asset_v1p2beta1/test_asset_service.py | 205 ++++++++++++++++++ 20 files changed, 657 insertions(+), 72 deletions(-) diff --git a/packages/google-cloud-asset/google/cloud/asset/gapic_version.py b/packages/google-cloud-asset/google/cloud/asset/gapic_version.py index 3d872171749b..360a0d13ebdd 100644 --- a/packages/google-cloud-asset/google/cloud/asset/gapic_version.py +++ b/packages/google-cloud-asset/google/cloud/asset/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "3.21.0" # {x-release-please-version} +__version__ = "0.0.0" # {x-release-please-version} diff --git a/packages/google-cloud-asset/google/cloud/asset_v1/gapic_version.py b/packages/google-cloud-asset/google/cloud/asset_v1/gapic_version.py index 3d872171749b..360a0d13ebdd 100644 --- a/packages/google-cloud-asset/google/cloud/asset_v1/gapic_version.py +++ b/packages/google-cloud-asset/google/cloud/asset_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "3.21.0" # {x-release-please-version} +__version__ = "0.0.0" # {x-release-please-version} diff --git a/packages/google-cloud-asset/google/cloud/asset_v1p1beta1/gapic_version.py b/packages/google-cloud-asset/google/cloud/asset_v1p1beta1/gapic_version.py index 3d872171749b..360a0d13ebdd 100644 --- a/packages/google-cloud-asset/google/cloud/asset_v1p1beta1/gapic_version.py +++ b/packages/google-cloud-asset/google/cloud/asset_v1p1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "3.21.0" # {x-release-please-version} +__version__ = "0.0.0" # {x-release-please-version} diff --git a/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/__init__.py b/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/__init__.py index d910565287c2..068ee389c30d 100644 --- a/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/__init__.py +++ b/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/__init__.py @@ -20,9 +20,11 @@ from .services.asset_service import AssetServiceAsyncClient, AssetServiceClient from .types.asset_service import ( + BatchGetAssetsHistoryResponse, ContentType, CreateFeedRequest, DeleteFeedRequest, + ExportAssetsResponse, Feed, FeedOutputConfig, GcsDestination, @@ -39,9 +41,11 @@ "AssetServiceAsyncClient", "Asset", "AssetServiceClient", + "BatchGetAssetsHistoryResponse", "ContentType", "CreateFeedRequest", "DeleteFeedRequest", + "ExportAssetsResponse", "Feed", "FeedOutputConfig", "GcsDestination", diff --git a/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/gapic_version.py b/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/gapic_version.py index 3d872171749b..360a0d13ebdd 100644 --- a/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/gapic_version.py +++ b/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "3.21.0" # {x-release-please-version} +__version__ = "0.0.0" # {x-release-please-version} diff --git a/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/services/asset_service/async_client.py b/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/services/asset_service/async_client.py index 501e3fd570dc..f5c887281a9f 100644 --- a/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/services/asset_service/async_client.py +++ b/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/services/asset_service/async_client.py @@ -42,6 +42,8 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore +from google.longrunning import operations_pb2 # type: ignore + from google.cloud.asset_v1p2beta1.types import asset_service from .client import AssetServiceClient @@ -263,8 +265,8 @@ async def sample_create_feed(): be an organization number (such as "organizations/123"), a folder number (such as "folders/123"), a project ID - (such as "projects/my-project-id")", or - a project number (such as + (such as "projects/my-project-id"), or a + project number (such as "projects/12345"). This corresponds to the ``parent`` field @@ -779,6 +781,60 @@ async def sample_delete_feed(): metadata=metadata, ) + async def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + async def __aenter__(self) -> "AssetServiceAsyncClient": return self diff --git a/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/services/asset_service/client.py b/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/services/asset_service/client.py index 635e361e52df..29f3e3947ce2 100644 --- a/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/services/asset_service/client.py +++ b/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/services/asset_service/client.py @@ -46,6 +46,8 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object] # type: ignore +from google.longrunning import operations_pb2 # type: ignore + from google.cloud.asset_v1p2beta1.types import asset_service from .transports.base import DEFAULT_CLIENT_INFO, AssetServiceTransport @@ -489,8 +491,8 @@ def sample_create_feed(): be an organization number (such as "organizations/123"), a folder number (such as "folders/123"), a project ID - (such as "projects/my-project-id")", or - a project number (such as + (such as "projects/my-project-id"), or a + project number (such as "projects/12345"). This corresponds to the ``parent`` field @@ -988,6 +990,60 @@ def __exit__(self, type, value, traceback): """ self.transport.close() + def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ diff --git a/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/services/asset_service/transports/base.py b/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/services/asset_service/transports/base.py index ed8800ead5d9..0d10e425debd 100644 --- a/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/services/asset_service/transports/base.py +++ b/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/services/asset_service/transports/base.py @@ -22,6 +22,7 @@ from google.api_core import retry as retries import google.auth # type: ignore from google.auth import credentials as ga_credentials # type: ignore +from google.longrunning import operations_pb2 # type: ignore from google.oauth2 import service_account # type: ignore from google.protobuf import empty_pb2 # type: ignore @@ -236,6 +237,15 @@ def delete_feed( ]: raise NotImplementedError() + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + @property def kind(self) -> str: raise NotImplementedError() diff --git a/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/services/asset_service/transports/grpc.py b/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/services/asset_service/transports/grpc.py index c6d452b2d603..46e6ae054963 100644 --- a/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/services/asset_service/transports/grpc.py +++ b/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/services/asset_service/transports/grpc.py @@ -20,6 +20,7 @@ import google.auth # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore +from google.longrunning import operations_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore import grpc # type: ignore @@ -363,6 +364,23 @@ def delete_feed( def close(self): self.grpc_channel.close() + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + @property def kind(self) -> str: return "grpc" diff --git a/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/services/asset_service/transports/grpc_asyncio.py b/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/services/asset_service/transports/grpc_asyncio.py index 7402afe669b7..6fd052c8580d 100644 --- a/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/services/asset_service/transports/grpc_asyncio.py +++ b/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/services/asset_service/transports/grpc_asyncio.py @@ -19,6 +19,7 @@ from google.api_core import gapic_v1, grpc_helpers_async from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore +from google.longrunning import operations_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore import grpc # type: ignore from grpc.experimental import aio # type: ignore @@ -370,5 +371,22 @@ def delete_feed( def close(self): return self.grpc_channel.close() + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + __all__ = ("AssetServiceGrpcAsyncIOTransport",) diff --git a/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/services/asset_service/transports/rest.py b/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/services/asset_service/transports/rest.py index da55c15c7f11..d795859f2744 100644 --- a/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/services/asset_service/transports/rest.py +++ b/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/services/asset_service/transports/rest.py @@ -36,6 +36,7 @@ OptionalRetry = Union[retries.Retry, object] # type: ignore +from google.longrunning import operations_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore from google.cloud.asset_v1p2beta1.types import asset_service @@ -203,6 +204,29 @@ def post_update_feed(self, response: asset_service.Feed) -> asset_service.Feed: """ return response + def pre_get_operation( + self, + request: operations_pb2.GetOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.GetOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the AssetService server. + """ + return request, metadata + + def post_get_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for get_operation + + Override in a subclass to manipulate the response + after it is returned by the AssetService server but before + it is returned to user code. + """ + return response + @dataclasses.dataclass class AssetServiceRestStub: @@ -798,6 +822,72 @@ def update_feed( # In C++ this would require a dynamic_cast return self._UpdateFeed(self._session, self._host, self._interceptor) # type: ignore + @property + def get_operation(self): + return self._GetOperation(self._session, self._host, self._interceptor) # type: ignore + + class _GetOperation(AssetServiceRestStub): + def __call__( + self, + request: operations_pb2.GetOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the get operation method over HTTP. + + Args: + request (operations_pb2.GetOperationRequest): + The request object for GetOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.Operation: Response from GetOperation method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1p2beta1/{name=*/*/operations/*/**}", + }, + ] + + request, metadata = self._interceptor.pre_get_operation(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.Operation() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_operation(resp) + return resp + @property def kind(self) -> str: return "rest" diff --git a/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/types/__init__.py b/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/types/__init__.py index d243980fc029..7ac7d791ef65 100644 --- a/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/types/__init__.py +++ b/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/types/__init__.py @@ -14,9 +14,11 @@ # limitations under the License. # from .asset_service import ( + BatchGetAssetsHistoryResponse, ContentType, CreateFeedRequest, DeleteFeedRequest, + ExportAssetsResponse, Feed, FeedOutputConfig, GcsDestination, @@ -30,8 +32,10 @@ from .assets import Asset, Resource, TemporalAsset, TimeWindow __all__ = ( + "BatchGetAssetsHistoryResponse", "CreateFeedRequest", "DeleteFeedRequest", + "ExportAssetsResponse", "Feed", "FeedOutputConfig", "GcsDestination", diff --git a/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/types/asset_service.py b/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/types/asset_service.py index c164986483c8..abcbbaa4c966 100644 --- a/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/types/asset_service.py +++ b/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/types/asset_service.py @@ -18,12 +18,17 @@ from typing import MutableMapping, MutableSequence from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore import proto # type: ignore +from google.cloud.asset_v1p2beta1.types import assets as gca_assets + __protobuf__ = proto.module( package="google.cloud.asset.v1p2beta1", manifest={ "ContentType", + "ExportAssetsResponse", + "BatchGetAssetsHistoryResponse", "CreateFeedRequest", "GetFeedRequest", "ListFeedsRequest", @@ -55,6 +60,48 @@ class ContentType(proto.Enum): IAM_POLICY = 2 +class ExportAssetsResponse(proto.Message): + r"""The export asset response. This message is returned by the + [google.longrunning.Operations.GetOperation][google.longrunning.Operations.GetOperation] + method in the returned + [google.longrunning.Operation.response][google.longrunning.Operation.response] + field. + + Attributes: + read_time (google.protobuf.timestamp_pb2.Timestamp): + Time the snapshot was taken. + output_config (google.cloud.asset_v1p2beta1.types.OutputConfig): + Output configuration indicating where the + results were output to. + """ + + read_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=1, + message=timestamp_pb2.Timestamp, + ) + output_config: "OutputConfig" = proto.Field( + proto.MESSAGE, + number=2, + message="OutputConfig", + ) + + +class BatchGetAssetsHistoryResponse(proto.Message): + r"""Batch get assets history response. + + Attributes: + assets (MutableSequence[google.cloud.asset_v1p2beta1.types.TemporalAsset]): + A list of assets with valid time windows. + """ + + assets: MutableSequence[gca_assets.TemporalAsset] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_assets.TemporalAsset, + ) + + class CreateFeedRequest(proto.Message): r"""Create asset feed request. @@ -66,7 +113,7 @@ class CreateFeedRequest(proto.Message): organization number (such as "organizations/123"), a folder number (such as "folders/123"), a project ID (such as - "projects/my-project-id")", or a project number + "projects/my-project-id"), or a project number (such as "projects/12345"). feed_id (str): Required. This is the client-assigned asset @@ -218,7 +265,7 @@ class GcsDestination(proto.Message): Attributes: uri (str): - The uri of the Cloud Storage object. It's the same uri that + The URI of the Cloud Storage object. It's the same URI that is used by gsutil. For example: "gs://bucket_name/object_name". See `Viewing and Editing Object @@ -236,12 +283,12 @@ class GcsDestination(proto.Message): class PubsubDestination(proto.Message): - r"""A Cloud Pubsub destination. + r"""A Pub/Sub destination. Attributes: topic (str): - The name of the Cloud Pub/Sub topic to publish to. For - example: ``projects/PROJECT_ID/topics/TOPIC_ID``. + The name of the Pub/Sub topic to publish to. For example: + ``projects/PROJECT_ID/topics/TOPIC_ID``. """ topic: str = proto.Field( @@ -257,7 +304,7 @@ class FeedOutputConfig(proto.Message): Attributes: pubsub_destination (google.cloud.asset_v1p2beta1.types.PubsubDestination): - Destination on Cloud Pubsub. + Destination on Pub/Sub. This field is a member of `oneof`_ ``destination``. """ @@ -293,7 +340,7 @@ class Feed(proto.Message): A list of the full names of the assets to receive updates. You must specify either or both of asset_names and asset_types. Only asset updates matching specified - asset_names and asset_types are exported to the feed. For + asset_names or asset_types are exported to the feed. For example: ``//compute.googleapis.com/projects/my_project_123/zones/zone1/instances/instance1``. See `Resource @@ -302,7 +349,7 @@ class Feed(proto.Message): asset_types (MutableSequence[str]): A list of types of the assets to receive updates. You must specify either or both of asset_names and asset_types. Only - asset updates matching specified asset_names and asset_types + asset updates matching specified asset_names or asset_types are exported to the feed. For example: "compute.googleapis.com/Disk" See `Introduction to Cloud Asset diff --git a/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/types/assets.py b/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/types/assets.py index c8d2c7759c82..63be218e9354 100644 --- a/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/types/assets.py +++ b/packages/google-cloud-asset/google/cloud/asset_v1p2beta1/types/assets.py @@ -17,7 +17,13 @@ from typing import MutableMapping, MutableSequence +from google.cloud.orgpolicy.v1 import orgpolicy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore +from google.identity.accesscontextmanager.v1 import access_level_pb2 # type: ignore +from google.identity.accesscontextmanager.v1 import access_policy_pb2 # type: ignore +from google.identity.accesscontextmanager.v1 import ( + service_perimeter_pb2, +) # type: ignore from google.protobuf import struct_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore import proto # type: ignore @@ -34,18 +40,18 @@ class TemporalAsset(proto.Message): - r"""Temporal asset. In addition to the asset, the temporal asset - includes the status of the asset and valid from and to time of - it. + r"""An asset in Google Cloud and its temporal metadata, including + the time window when it was observed and its status during that + window. Attributes: window (google.cloud.asset_v1p2beta1.types.TimeWindow): The time window when the asset data and state was observed. deleted (bool): - If the asset is deleted or not. + Whether the asset has been deleted or not. asset (google.cloud.asset_v1p2beta1.types.Asset): - Asset. + An asset in Google Cloud. """ window: "TimeWindow" = proto.Field( @@ -65,14 +71,15 @@ class TemporalAsset(proto.Message): class TimeWindow(proto.Message): - r"""A time window of (start_time, end_time]. + r"""A time window specified by its ``start_time`` and ``end_time``. Attributes: start_time (google.protobuf.timestamp_pb2.Timestamp): Start time of the time window (exclusive). end_time (google.protobuf.timestamp_pb2.Timestamp): - End time of the time window (inclusive). - Current timestamp if not specified. + End time of the time window (inclusive). If + not specified, the current timestamp is used + instead. """ start_time: timestamp_pb2.Timestamp = proto.Field( @@ -88,35 +95,81 @@ class TimeWindow(proto.Message): class Asset(proto.Message): - r"""Cloud asset. This includes all Google Cloud Platform - resources, Cloud IAM policies, and other non-GCP assets. + r"""An asset in Google Cloud. An asset can be any resource in the Google + Cloud `resource + hierarchy `__, + a resource outside the Google Cloud resource hierarchy (such as + Google Kubernetes Engine clusters and objects), or a policy (e.g. + IAM policy). See `Supported asset + types `__ + for more information. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: name (str): - The full name of the asset. For example: - ``//compute.googleapis.com/projects/my_project_123/zones/zone1/instances/instance1``. + The full name of the asset. Example: + ``//compute.googleapis.com/projects/my_project_123/zones/zone1/instances/instance1`` + See `Resource - Names `__ + names `__ for more information. asset_type (str): - Type of the asset. Example: - "compute.googleapis.com/Disk". + The type of the asset. Example: + ``compute.googleapis.com/Disk`` + + See `Supported asset + types `__ + for more information. resource (google.cloud.asset_v1p2beta1.types.Resource): - Representation of the resource. + A representation of the resource. iam_policy (google.iam.v1.policy_pb2.Policy): - Representation of the actual Cloud IAM policy - set on a cloud resource. For each resource, - there must be at most one Cloud IAM policy set - on it. + A representation of the IAM policy set on a Google Cloud + resource. There can be a maximum of one IAM policy set on + any given resource. In addition, IAM policies inherit their + granted access scope from any policies set on parent + resources in the resource hierarchy. Therefore, the + effectively policy is the union of both the policy set on + this resource and each policy set on all of the resource's + ancestry resource levels in the hierarchy. See `this + topic `__ + for more information. ancestors (MutableSequence[str]): - Asset's ancestry path in Cloud Resource Manager (CRM) - hierarchy, represented as a list of relative resource names. - Ancestry path starts with the closest CRM ancestor and ends - at root. If the asset is a CRM project/folder/organization, - this starts from the asset itself. - - Example: ["projects/123456789", "folders/5432", - "organizations/1234"] + The ancestry path of an asset in Google Cloud `resource + hierarchy `__, + represented as a list of relative resource names. An + ancestry path starts with the closest ancestor in the + hierarchy and ends at root. If the asset is a project, + folder, or organization, the ancestry path starts from the + asset itself. + + Example: + ``["projects/123456789", "folders/5432", "organizations/1234"]`` + access_policy (google.identity.accesscontextmanager.v1.access_policy_pb2.AccessPolicy): + Please also refer to the `access policy user + guide `__. + + This field is a member of `oneof`_ ``access_context_policy``. + access_level (google.identity.accesscontextmanager.v1.access_level_pb2.AccessLevel): + Please also refer to the `access level user + guide `__. + + This field is a member of `oneof`_ ``access_context_policy``. + service_perimeter (google.identity.accesscontextmanager.v1.service_perimeter_pb2.ServicePerimeter): + Please also refer to the `service perimeter user + guide `__. + + This field is a member of `oneof`_ ``access_context_policy``. + org_policy (MutableSequence[google.cloud.orgpolicy.v1.orgpolicy_pb2.Policy]): + A representation of an `organization + policy `__. + There can be more than one organization policy with + different constraints set on a given resource. """ name: str = proto.Field( @@ -141,49 +194,73 @@ class Asset(proto.Message): proto.STRING, number=6, ) + access_policy: access_policy_pb2.AccessPolicy = proto.Field( + proto.MESSAGE, + number=7, + oneof="access_context_policy", + message=access_policy_pb2.AccessPolicy, + ) + access_level: access_level_pb2.AccessLevel = proto.Field( + proto.MESSAGE, + number=8, + oneof="access_context_policy", + message=access_level_pb2.AccessLevel, + ) + service_perimeter: service_perimeter_pb2.ServicePerimeter = proto.Field( + proto.MESSAGE, + number=9, + oneof="access_context_policy", + message=service_perimeter_pb2.ServicePerimeter, + ) + org_policy: MutableSequence[orgpolicy_pb2.Policy] = proto.RepeatedField( + proto.MESSAGE, + number=10, + message=orgpolicy_pb2.Policy, + ) class Resource(proto.Message): - r"""Representation of a cloud resource. + r"""A representation of a Google Cloud resource. Attributes: version (str): - The API version. Example: "v1". + The API version. Example: ``v1`` discovery_document_uri (str): The URL of the discovery document containing the resource's - JSON schema. For example: - ``"https://www.googleapis.com/discovery/v1/apis/compute/v1/rest"``. - It will be left unspecified for resources without a - discovery-based API, such as Cloud Bigtable. + JSON schema. Example: + ``https://www.googleapis.com/discovery/v1/apis/compute/v1/rest`` + + This value is unspecified for resources that do not have an + API based on a discovery document, such as Cloud Bigtable. discovery_name (str): - The JSON schema name listed in the discovery - document. Example: "Project". It will be left - unspecified for resources (such as Cloud - Bigtable) without a discovery-based API. + The JSON schema name listed in the discovery document. + Example: ``Project`` + + This value is unspecified for resources that do not have an + API based on a discovery document, such as Cloud Bigtable. resource_url (str): - The REST URL for accessing the resource. An HTTP GET - operation using this URL returns the resource itself. - Example: - ``https://cloudresourcemanager.googleapis.com/v1/projects/my-project-123``. - It will be left unspecified for resources without a REST - API. + The REST URL for accessing the resource. An HTTP ``GET`` + request using this URL returns the resource itself. Example: + ``https://cloudresourcemanager.googleapis.com/v1/projects/my-project-123`` + + This value is unspecified for resources without a REST API. parent (str): The full name of the immediate parent of this resource. See `Resource Names `__ for more information. - For GCP assets, it is the parent resource defined in the - `Cloud IAM policy + For Google Cloud assets, this value is the parent resource + defined in the `IAM policy hierarchy `__. - For example: - ``"//cloudresourcemanager.googleapis.com/projects/my_project_123"``. + Example: + ``//cloudresourcemanager.googleapis.com/projects/my_project_123`` - For third-party assets, it is up to the users to define. + For third-party assets, this field may be set differently. data (google.protobuf.struct_pb2.Struct): The content of the resource, in which some - sensitive fields are scrubbed away and may not - be present. + sensitive fields are removed and may not be + present. """ version: str = proto.Field( diff --git a/packages/google-cloud-asset/google/cloud/asset_v1p5beta1/gapic_version.py b/packages/google-cloud-asset/google/cloud/asset_v1p5beta1/gapic_version.py index 3d872171749b..360a0d13ebdd 100644 --- a/packages/google-cloud-asset/google/cloud/asset_v1p5beta1/gapic_version.py +++ b/packages/google-cloud-asset/google/cloud/asset_v1p5beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "3.21.0" # {x-release-please-version} +__version__ = "0.0.0" # {x-release-please-version} diff --git a/packages/google-cloud-asset/samples/generated_samples/snippet_metadata_google.cloud.asset.v1.json b/packages/google-cloud-asset/samples/generated_samples/snippet_metadata_google.cloud.asset.v1.json index 49e86ca067b8..5a90dfa88b31 100644 --- a/packages/google-cloud-asset/samples/generated_samples/snippet_metadata_google.cloud.asset.v1.json +++ b/packages/google-cloud-asset/samples/generated_samples/snippet_metadata_google.cloud.asset.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-asset", - "version": "3.21.0" + "version": "0.1.0" }, "snippets": [ { diff --git a/packages/google-cloud-asset/samples/generated_samples/snippet_metadata_google.cloud.asset.v1p1beta1.json b/packages/google-cloud-asset/samples/generated_samples/snippet_metadata_google.cloud.asset.v1p1beta1.json index 9b56624814df..dcbeb822733b 100644 --- a/packages/google-cloud-asset/samples/generated_samples/snippet_metadata_google.cloud.asset.v1p1beta1.json +++ b/packages/google-cloud-asset/samples/generated_samples/snippet_metadata_google.cloud.asset.v1p1beta1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-asset", - "version": "3.21.0" + "version": "0.1.0" }, "snippets": [ { diff --git a/packages/google-cloud-asset/samples/generated_samples/snippet_metadata_google.cloud.asset.v1p2beta1.json b/packages/google-cloud-asset/samples/generated_samples/snippet_metadata_google.cloud.asset.v1p2beta1.json index d8c9e9f32cad..38eaede856da 100644 --- a/packages/google-cloud-asset/samples/generated_samples/snippet_metadata_google.cloud.asset.v1p2beta1.json +++ b/packages/google-cloud-asset/samples/generated_samples/snippet_metadata_google.cloud.asset.v1p2beta1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-asset", - "version": "3.21.0" + "version": "0.1.0" }, "snippets": [ { diff --git a/packages/google-cloud-asset/samples/generated_samples/snippet_metadata_google.cloud.asset.v1p5beta1.json b/packages/google-cloud-asset/samples/generated_samples/snippet_metadata_google.cloud.asset.v1p5beta1.json index 681b6b289f54..13ffc229e3a7 100644 --- a/packages/google-cloud-asset/samples/generated_samples/snippet_metadata_google.cloud.asset.v1p5beta1.json +++ b/packages/google-cloud-asset/samples/generated_samples/snippet_metadata_google.cloud.asset.v1p5beta1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-asset", - "version": "3.21.0" + "version": "0.1.0" }, "snippets": [ { diff --git a/packages/google-cloud-asset/tests/unit/gapic/asset_v1p2beta1/test_asset_service.py b/packages/google-cloud-asset/tests/unit/gapic/asset_v1p2beta1/test_asset_service.py index d36a3f340f11..2c827659c511 100644 --- a/packages/google-cloud-asset/tests/unit/gapic/asset_v1p2beta1/test_asset_service.py +++ b/packages/google-cloud-asset/tests/unit/gapic/asset_v1p2beta1/test_asset_service.py @@ -32,6 +32,7 @@ import google.auth from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError +from google.longrunning import operations_pb2 # type: ignore from google.oauth2 import service_account from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import json_format @@ -3339,6 +3340,7 @@ def test_asset_service_base_transport(): "list_feeds", "update_feed", "delete_feed", + "get_operation", ) for method in methods: with pytest.raises(NotImplementedError): @@ -3892,6 +3894,209 @@ async def test_transport_close_async(): close.assert_called_once() +def test_get_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.GetOperationRequest +): + client = AssetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "sample1/sample2/operations/sample3/sample4"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.GetOperationRequest, + dict, + ], +) +def test_get_operation_rest(request_type): + client = AssetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "sample1/sample2/operations/sample3/sample4"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_operation(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_get_operation(transport: str = "grpc"): + client = AssetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +@pytest.mark.asyncio +async def test_get_operation_async(transport: str = "grpc_asyncio"): + client = AssetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_get_operation_field_headers(): + client = AssetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = AssetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_get_operation_from_dict(): + client = AssetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = AssetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + def test_transport_close(): transports = { "rest": "_session", From 94e63cbbfe85b11e5cf38cbaa3511be8833a86f1 Mon Sep 17 00:00:00 2001 From: "owlbot-bootstrapper[bot]" <104649659+owlbot-bootstrapper[bot]@users.noreply.github.com> Date: Tue, 12 Dec 2023 14:20:37 -0500 Subject: [PATCH 13/80] feat: add initial files for google.shopping.css.v1 (#12114) Source-Link: https://github.com/googleapis/googleapis-gen/commit/1617d1ede2a05c13289cfea4883bcdf9116f564e Copy-Tag: eyJwIjoicGFja2FnZXMvZ29vZ2xlLXNob3BwaW5nLWNzcy8uT3dsQm90LnlhbWwiLCJoIjoiMTYxN2QxZWRlMmEwNWMxMzI4OWNmZWE0ODgzYmNkZjkxMTZmNTY0ZSJ9 --------- Co-authored-by: Owlbot Bootstrapper Co-authored-by: Owl Bot Co-authored-by: ohmayr --- packages/google-shopping-css/.OwlBot.yaml | 18 + packages/google-shopping-css/.coveragerc | 13 + packages/google-shopping-css/.flake8 | 33 + packages/google-shopping-css/.gitignore | 63 + .../google-shopping-css/.repo-metadata.json | 17 + packages/google-shopping-css/CHANGELOG.md | 1 + .../google-shopping-css/CODE_OF_CONDUCT.md | 95 + packages/google-shopping-css/CONTRIBUTING.rst | 271 ++ packages/google-shopping-css/LICENSE | 202 + packages/google-shopping-css/MANIFEST.in | 25 + packages/google-shopping-css/README.rst | 108 + .../google-shopping-css/docs/CHANGELOG.md | 1 + packages/google-shopping-css/docs/README.rst | 1 + .../docs/_static/custom.css | 20 + .../docs/_templates/layout.html | 50 + packages/google-shopping-css/docs/conf.py | 384 ++ .../docs/css_v1/account_labels_service.rst | 10 + .../docs/css_v1/accounts_service.rst | 10 + .../css_v1/css_product_inputs_service.rst | 6 + .../docs/css_v1/css_products_service.rst | 10 + .../docs/css_v1/services_.rst | 9 + .../docs/css_v1/types_.rst | 6 + packages/google-shopping-css/docs/index.rst | 23 + .../docs/multiprocessing.rst | 7 + .../google/shopping/css/__init__.py | 113 + .../google/shopping/css/gapic_version.py | 16 + .../google/shopping/css/py.typed | 2 + .../google/shopping/css_v1/__init__.py | 102 + .../shopping/css_v1/gapic_metadata.json | 250 + .../google/shopping/css_v1/gapic_version.py | 16 + .../google/shopping/css_v1/py.typed | 2 + .../shopping/css_v1/services/__init__.py | 15 + .../account_labels_service/__init__.py | 22 + .../account_labels_service/async_client.py | 664 +++ .../services/account_labels_service/client.py | 883 ++++ .../services/account_labels_service/pagers.py | 155 + .../transports/__init__.py | 38 + .../account_labels_service/transports/base.py | 210 + .../account_labels_service/transports/grpc.py | 352 ++ .../transports/grpc_asyncio.py | 355 ++ .../account_labels_service/transports/rest.py | 691 +++ .../services/accounts_service/__init__.py | 22 + .../services/accounts_service/async_client.py | 564 +++ .../services/accounts_service/client.py | 775 ++++ .../services/accounts_service/pagers.py | 155 + .../accounts_service/transports/__init__.py | 36 + .../accounts_service/transports/base.py | 204 + .../accounts_service/transports/grpc.py | 320 ++ .../transports/grpc_asyncio.py | 322 ++ .../accounts_service/transports/rest.py | 558 +++ .../css_product_inputs_service/__init__.py | 22 + .../async_client.py | 437 ++ .../css_product_inputs_service/client.py | 663 +++ .../transports/__init__.py | 41 + .../transports/base.py | 173 + .../transports/grpc.py | 306 ++ .../transports/grpc_asyncio.py | 307 ++ .../transports/rest.py | 432 ++ .../services/css_products_service/__init__.py | 22 + .../css_products_service/async_client.py | 481 ++ .../services/css_products_service/client.py | 693 +++ .../services/css_products_service/pagers.py | 155 + .../transports/__init__.py | 38 + .../css_products_service/transports/base.py | 190 + .../css_products_service/transports/grpc.py | 304 ++ .../transports/grpc_asyncio.py | 306 ++ .../css_products_service/transports/rest.py | 437 ++ .../google/shopping/css_v1/types/__init__.py | 76 + .../google/shopping/css_v1/types/accounts.py | 297 ++ .../shopping/css_v1/types/accounts_labels.py | 225 + .../css_v1/types/css_product_common.py | 750 +++ .../css_v1/types/css_product_inputs.py | 201 + .../shopping/css_v1/types/css_products.py | 182 + packages/google-shopping-css/mypy.ini | 3 + packages/google-shopping-css/noxfile.py | 410 ++ ...bels_service_create_account_label_async.py | 52 + ...abels_service_create_account_label_sync.py | 52 + ...bels_service_delete_account_label_async.py | 50 + ...abels_service_delete_account_label_sync.py | 50 + ...abels_service_list_account_labels_async.py | 53 + ...labels_service_list_account_labels_sync.py | 53 + ...bels_service_update_account_label_async.py | 51 + ...abels_service_update_account_label_sync.py | 51 + ...ated_accounts_service_get_account_async.py | 52 + ...rated_accounts_service_get_account_sync.py | 52 + ...ounts_service_list_child_accounts_async.py | 53 + ...counts_service_list_child_accounts_sync.py | 53 + ...ed_accounts_service_update_labels_async.py | 52 + ...ted_accounts_service_update_labels_sync.py | 52 + ..._service_delete_css_product_input_async.py | 50 + ...s_service_delete_css_product_input_sync.py | 50 + ..._service_insert_css_product_input_async.py | 59 + ...s_service_insert_css_product_input_sync.py | 59 + ..._products_service_get_css_product_async.py | 52 + ...s_products_service_get_css_product_sync.py | 52 + ...roducts_service_list_css_products_async.py | 53 + ...products_service_list_css_products_sync.py | 53 + ...ippet_metadata_google.shopping.css.v1.json | 1774 ++++++++ .../scripts/decrypt-secrets.sh | 46 + .../scripts/fixup_css_v1_keywords.py | 186 + packages/google-shopping-css/setup.py | 91 + .../google-shopping-css/testing/.gitignore | 3 + .../testing/constraints-3.10.txt | 7 + .../testing/constraints-3.11.txt | 7 + .../testing/constraints-3.12.txt | 7 + .../testing/constraints-3.7.txt | 10 + .../testing/constraints-3.8.txt | 7 + .../testing/constraints-3.9.txt | 7 + .../google-shopping-css/tests/__init__.py | 15 + .../tests/unit/__init__.py | 15 + .../tests/unit/gapic/__init__.py | 15 + .../tests/unit/gapic/css_v1/__init__.py | 15 + .../css_v1/test_account_labels_service.py | 4053 +++++++++++++++++ .../gapic/css_v1/test_accounts_service.py | 3350 ++++++++++++++ .../css_v1/test_css_product_inputs_service.py | 2598 +++++++++++ .../gapic/css_v1/test_css_products_service.py | 2802 ++++++++++++ 116 files changed, 31538 insertions(+) create mode 100644 packages/google-shopping-css/.OwlBot.yaml create mode 100644 packages/google-shopping-css/.coveragerc create mode 100644 packages/google-shopping-css/.flake8 create mode 100644 packages/google-shopping-css/.gitignore create mode 100644 packages/google-shopping-css/.repo-metadata.json create mode 100644 packages/google-shopping-css/CHANGELOG.md create mode 100644 packages/google-shopping-css/CODE_OF_CONDUCT.md create mode 100644 packages/google-shopping-css/CONTRIBUTING.rst create mode 100644 packages/google-shopping-css/LICENSE create mode 100644 packages/google-shopping-css/MANIFEST.in create mode 100644 packages/google-shopping-css/README.rst create mode 120000 packages/google-shopping-css/docs/CHANGELOG.md create mode 120000 packages/google-shopping-css/docs/README.rst create mode 100644 packages/google-shopping-css/docs/_static/custom.css create mode 100644 packages/google-shopping-css/docs/_templates/layout.html create mode 100644 packages/google-shopping-css/docs/conf.py create mode 100644 packages/google-shopping-css/docs/css_v1/account_labels_service.rst create mode 100644 packages/google-shopping-css/docs/css_v1/accounts_service.rst create mode 100644 packages/google-shopping-css/docs/css_v1/css_product_inputs_service.rst create mode 100644 packages/google-shopping-css/docs/css_v1/css_products_service.rst create mode 100644 packages/google-shopping-css/docs/css_v1/services_.rst create mode 100644 packages/google-shopping-css/docs/css_v1/types_.rst create mode 100644 packages/google-shopping-css/docs/index.rst create mode 100644 packages/google-shopping-css/docs/multiprocessing.rst create mode 100644 packages/google-shopping-css/google/shopping/css/__init__.py create mode 100644 packages/google-shopping-css/google/shopping/css/gapic_version.py create mode 100644 packages/google-shopping-css/google/shopping/css/py.typed create mode 100644 packages/google-shopping-css/google/shopping/css_v1/__init__.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/gapic_metadata.json create mode 100644 packages/google-shopping-css/google/shopping/css_v1/gapic_version.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/py.typed create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/__init__.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/account_labels_service/__init__.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/account_labels_service/async_client.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/account_labels_service/client.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/account_labels_service/pagers.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/account_labels_service/transports/__init__.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/account_labels_service/transports/base.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/account_labels_service/transports/grpc.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/account_labels_service/transports/grpc_asyncio.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/account_labels_service/transports/rest.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/accounts_service/__init__.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/accounts_service/async_client.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/accounts_service/client.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/accounts_service/pagers.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/accounts_service/transports/__init__.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/accounts_service/transports/base.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/accounts_service/transports/grpc.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/accounts_service/transports/grpc_asyncio.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/accounts_service/transports/rest.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/css_product_inputs_service/__init__.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/css_product_inputs_service/async_client.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/css_product_inputs_service/client.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/css_product_inputs_service/transports/__init__.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/css_product_inputs_service/transports/base.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/css_product_inputs_service/transports/grpc.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/css_product_inputs_service/transports/grpc_asyncio.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/css_product_inputs_service/transports/rest.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/css_products_service/__init__.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/css_products_service/async_client.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/css_products_service/client.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/css_products_service/pagers.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/css_products_service/transports/__init__.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/css_products_service/transports/base.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/css_products_service/transports/grpc.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/css_products_service/transports/grpc_asyncio.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/services/css_products_service/transports/rest.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/types/__init__.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/types/accounts.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/types/accounts_labels.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/types/css_product_common.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/types/css_product_inputs.py create mode 100644 packages/google-shopping-css/google/shopping/css_v1/types/css_products.py create mode 100644 packages/google-shopping-css/mypy.ini create mode 100644 packages/google-shopping-css/noxfile.py create mode 100644 packages/google-shopping-css/samples/generated_samples/css_v1_generated_account_labels_service_create_account_label_async.py create mode 100644 packages/google-shopping-css/samples/generated_samples/css_v1_generated_account_labels_service_create_account_label_sync.py create mode 100644 packages/google-shopping-css/samples/generated_samples/css_v1_generated_account_labels_service_delete_account_label_async.py create mode 100644 packages/google-shopping-css/samples/generated_samples/css_v1_generated_account_labels_service_delete_account_label_sync.py create mode 100644 packages/google-shopping-css/samples/generated_samples/css_v1_generated_account_labels_service_list_account_labels_async.py create mode 100644 packages/google-shopping-css/samples/generated_samples/css_v1_generated_account_labels_service_list_account_labels_sync.py create mode 100644 packages/google-shopping-css/samples/generated_samples/css_v1_generated_account_labels_service_update_account_label_async.py create mode 100644 packages/google-shopping-css/samples/generated_samples/css_v1_generated_account_labels_service_update_account_label_sync.py create mode 100644 packages/google-shopping-css/samples/generated_samples/css_v1_generated_accounts_service_get_account_async.py create mode 100644 packages/google-shopping-css/samples/generated_samples/css_v1_generated_accounts_service_get_account_sync.py create mode 100644 packages/google-shopping-css/samples/generated_samples/css_v1_generated_accounts_service_list_child_accounts_async.py create mode 100644 packages/google-shopping-css/samples/generated_samples/css_v1_generated_accounts_service_list_child_accounts_sync.py create mode 100644 packages/google-shopping-css/samples/generated_samples/css_v1_generated_accounts_service_update_labels_async.py create mode 100644 packages/google-shopping-css/samples/generated_samples/css_v1_generated_accounts_service_update_labels_sync.py create mode 100644 packages/google-shopping-css/samples/generated_samples/css_v1_generated_css_product_inputs_service_delete_css_product_input_async.py create mode 100644 packages/google-shopping-css/samples/generated_samples/css_v1_generated_css_product_inputs_service_delete_css_product_input_sync.py create mode 100644 packages/google-shopping-css/samples/generated_samples/css_v1_generated_css_product_inputs_service_insert_css_product_input_async.py create mode 100644 packages/google-shopping-css/samples/generated_samples/css_v1_generated_css_product_inputs_service_insert_css_product_input_sync.py create mode 100644 packages/google-shopping-css/samples/generated_samples/css_v1_generated_css_products_service_get_css_product_async.py create mode 100644 packages/google-shopping-css/samples/generated_samples/css_v1_generated_css_products_service_get_css_product_sync.py create mode 100644 packages/google-shopping-css/samples/generated_samples/css_v1_generated_css_products_service_list_css_products_async.py create mode 100644 packages/google-shopping-css/samples/generated_samples/css_v1_generated_css_products_service_list_css_products_sync.py create mode 100644 packages/google-shopping-css/samples/generated_samples/snippet_metadata_google.shopping.css.v1.json create mode 100755 packages/google-shopping-css/scripts/decrypt-secrets.sh create mode 100644 packages/google-shopping-css/scripts/fixup_css_v1_keywords.py create mode 100644 packages/google-shopping-css/setup.py create mode 100644 packages/google-shopping-css/testing/.gitignore create mode 100644 packages/google-shopping-css/testing/constraints-3.10.txt create mode 100644 packages/google-shopping-css/testing/constraints-3.11.txt create mode 100644 packages/google-shopping-css/testing/constraints-3.12.txt create mode 100644 packages/google-shopping-css/testing/constraints-3.7.txt create mode 100644 packages/google-shopping-css/testing/constraints-3.8.txt create mode 100644 packages/google-shopping-css/testing/constraints-3.9.txt create mode 100644 packages/google-shopping-css/tests/__init__.py create mode 100644 packages/google-shopping-css/tests/unit/__init__.py create mode 100644 packages/google-shopping-css/tests/unit/gapic/__init__.py create mode 100644 packages/google-shopping-css/tests/unit/gapic/css_v1/__init__.py create mode 100644 packages/google-shopping-css/tests/unit/gapic/css_v1/test_account_labels_service.py create mode 100644 packages/google-shopping-css/tests/unit/gapic/css_v1/test_accounts_service.py create mode 100644 packages/google-shopping-css/tests/unit/gapic/css_v1/test_css_product_inputs_service.py create mode 100644 packages/google-shopping-css/tests/unit/gapic/css_v1/test_css_products_service.py diff --git a/packages/google-shopping-css/.OwlBot.yaml b/packages/google-shopping-css/.OwlBot.yaml new file mode 100644 index 000000000000..03e3b9520666 --- /dev/null +++ b/packages/google-shopping-css/.OwlBot.yaml @@ -0,0 +1,18 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +deep-copy-regex: + - source: /google/shopping/css/(v.*)/.*-py + dest: /owl-bot-staging/google-shopping-css/$1 +api-name: google-shopping-css diff --git a/packages/google-shopping-css/.coveragerc b/packages/google-shopping-css/.coveragerc new file mode 100644 index 000000000000..11afeda0e930 --- /dev/null +++ b/packages/google-shopping-css/.coveragerc @@ -0,0 +1,13 @@ +[run] +branch = True + +[report] +show_missing = True +omit = + google/shopping/css/__init__.py + google/shopping/css/gapic_version.py +exclude_lines = + # Re-enable the standard pragma + pragma: NO COVER + # Ignore debug-only repr + def __repr__ diff --git a/packages/google-shopping-css/.flake8 b/packages/google-shopping-css/.flake8 new file mode 100644 index 000000000000..87f6e408c47d --- /dev/null +++ b/packages/google-shopping-css/.flake8 @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Generated by synthtool. DO NOT EDIT! +[flake8] +ignore = E203, E231, E266, E501, W503 +exclude = + # Exclude generated code. + **/proto/** + **/gapic/** + **/services/** + **/types/** + *_pb2.py + + # Standard linting exemptions. + **/.nox/** + __pycache__, + .git, + *.pyc, + conf.py diff --git a/packages/google-shopping-css/.gitignore b/packages/google-shopping-css/.gitignore new file mode 100644 index 000000000000..b4243ced74e4 --- /dev/null +++ b/packages/google-shopping-css/.gitignore @@ -0,0 +1,63 @@ +*.py[cod] +*.sw[op] + +# C extensions +*.so + +# Packages +*.egg +*.egg-info +dist +build +eggs +.eggs +parts +bin +var +sdist +develop-eggs +.installed.cfg +lib +lib64 +__pycache__ + +# Installer logs +pip-log.txt + +# Unit test / coverage reports +.coverage +.nox +.cache +.pytest_cache + + +# Mac +.DS_Store + +# JetBrains +.idea + +# VS Code +.vscode + +# emacs +*~ + +# Built documentation +docs/_build +bigquery/docs/generated +docs.metadata + +# Virtual environment +env/ + +# Test logs +coverage.xml +*sponge_log.xml + +# System test environment variables. +system_tests/local_test_setup + +# Make sure a generated file isn't accidentally committed. +pylintrc +pylintrc.test diff --git a/packages/google-shopping-css/.repo-metadata.json b/packages/google-shopping-css/.repo-metadata.json new file mode 100644 index 000000000000..0e8f0e70bec7 --- /dev/null +++ b/packages/google-shopping-css/.repo-metadata.json @@ -0,0 +1,17 @@ +{ + "name": "google-shopping-css", + "name_pretty": "CSS API", + "api_description": "Programmatically manage your Comparison Shopping Service (CSS) account data at scale.", + "product_documentation": "https://developers.google.com/comparison-shopping-services/api", + "client_documentation": "https://googleapis.dev/python/google-shopping-css/latest", + "issue_tracker": "https://issuetracker.google.com/issues/new?component=826068&template=1564577", + "release_level": "preview", + "language": "python", + "library_type": "GAPIC_AUTO", + "repo": "googleapis/google-cloud-python", + "distribution_name": "google-shopping-css", + "api_id": "css.googleapis.com", + "default_version": "v1", + "codeowner_team": "", + "api_shortname": "css" +} \ No newline at end of file diff --git a/packages/google-shopping-css/CHANGELOG.md b/packages/google-shopping-css/CHANGELOG.md new file mode 100644 index 000000000000..5ddad421e08f --- /dev/null +++ b/packages/google-shopping-css/CHANGELOG.md @@ -0,0 +1 @@ +# Changelog \ No newline at end of file diff --git a/packages/google-shopping-css/CODE_OF_CONDUCT.md b/packages/google-shopping-css/CODE_OF_CONDUCT.md new file mode 100644 index 000000000000..039f43681204 --- /dev/null +++ b/packages/google-shopping-css/CODE_OF_CONDUCT.md @@ -0,0 +1,95 @@ + +# Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of +experience, education, socio-economic status, nationality, personal appearance, +race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, or to ban temporarily or permanently any +contributor for other behaviors that they deem inappropriate, threatening, +offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +This Code of Conduct also applies outside the project spaces when the Project +Steward has a reasonable belief that an individual's behavior may have a +negative impact on the project or its community. + +## Conflict Resolution + +We do not believe that all conflict is bad; healthy debate and disagreement +often yield positive results. However, it is never okay to be disrespectful or +to engage in behavior that violates the project’s code of conduct. + +If you see someone violating the code of conduct, you are encouraged to address +the behavior directly with those involved. Many issues can be resolved quickly +and easily, and this gives people more control over the outcome of their +dispute. If you are unable to resolve the matter for any reason, or if the +behavior is threatening or harassing, report it. We are dedicated to providing +an environment where participants feel welcome and safe. + + +Reports should be directed to *googleapis-stewards@google.com*, the +Project Steward(s) for *Google Cloud Client Libraries*. It is the Project Steward’s duty to +receive and address reported violations of the code of conduct. They will then +work with a committee consisting of representatives from the Open Source +Programs Office and the Google Open Source Strategy team. If for any reason you +are uncomfortable reaching out to the Project Steward, please email +opensource@google.com. + +We will investigate every complaint, but you may not receive a direct response. +We will use our discretion in determining when and how to follow up on reported +incidents, which may range from not taking action to permanent expulsion from +the project and project-sponsored spaces. We will notify the accused of the +report and provide them an opportunity to discuss it before any action is taken. +The identity of the reporter will be omitted from the details of the report +supplied to the accused. In potentially harmful situations, such as ongoing +harassment or threats to anyone's safety, we may take action without notice. + +## Attribution + +This Code of Conduct is adapted from the Contributor Covenant, version 1.4, +available at +https://www.contributor-covenant.org/version/1/4/code-of-conduct.html \ No newline at end of file diff --git a/packages/google-shopping-css/CONTRIBUTING.rst b/packages/google-shopping-css/CONTRIBUTING.rst new file mode 100644 index 000000000000..e6e9d1877868 --- /dev/null +++ b/packages/google-shopping-css/CONTRIBUTING.rst @@ -0,0 +1,271 @@ +.. Generated by synthtool. DO NOT EDIT! +############ +Contributing +############ + +#. **Please sign one of the contributor license agreements below.** +#. Fork the repo, develop and test your code changes, add docs. +#. Make sure that your commit messages clearly describe the changes. +#. Send a pull request. (Please Read: `Faster Pull Request Reviews`_) + +.. _Faster Pull Request Reviews: https://github.com/kubernetes/community/blob/master/contributors/guide/pull-requests.md#best-practices-for-faster-reviews + +.. contents:: Here are some guidelines for hacking on the Google Cloud Client libraries. + +*************** +Adding Features +*************** + +In order to add a feature: + +- The feature must be documented in both the API and narrative + documentation. + +- The feature must work fully on the following CPython versions: + 3.7, 3.8, 3.9, 3.10, 3.11 and 3.12 on both UNIX and Windows. + +- The feature must not add unnecessary dependencies (where + "unnecessary" is of course subjective, but new dependencies should + be discussed). + +**************************** +Using a Development Checkout +**************************** + +You'll have to create a development environment using a Git checkout: + +- While logged into your GitHub account, navigate to the + ``google-cloud-python`` `repo`_ on GitHub. + +- Fork and clone the ``google-cloud-python`` repository to your GitHub account by + clicking the "Fork" button. + +- Clone your fork of ``google-cloud-python`` from your GitHub account to your local + computer, substituting your account username and specifying the destination + as ``hack-on-google-cloud-python``. E.g.:: + + $ cd ${HOME} + $ git clone git@github.com:USERNAME/google-cloud-python.git hack-on-google-cloud-python + $ cd hack-on-google-cloud-python + # Configure remotes such that you can pull changes from the googleapis/google-cloud-python + # repository into your local repository. + $ git remote add upstream git@github.com:googleapis/google-cloud-python.git + # fetch and merge changes from upstream into main + $ git fetch upstream + $ git merge upstream/main + +Now your local repo is set up such that you will push changes to your GitHub +repo, from which you can submit a pull request. + +To work on the codebase and run the tests, we recommend using ``nox``, +but you can also use a ``virtualenv`` of your own creation. + +.. _repo: https://github.com/googleapis/google-cloud-python + +Using ``nox`` +============= + +We use `nox `__ to instrument our tests. + +- To test your changes, run unit tests with ``nox``:: + $ nox -s unit + +- To run a single unit test:: + + $ nox -s unit-3.12 -- -k + + + .. note:: + + The unit tests and system tests are described in the + ``noxfile.py`` files in each directory. + +.. nox: https://pypi.org/project/nox/ + +***************************************** +I'm getting weird errors... Can you help? +***************************************** + +If the error mentions ``Python.h`` not being found, +install ``python-dev`` and try again. +On Debian/Ubuntu:: + + $ sudo apt-get install python-dev + +************ +Coding Style +************ +- We use the automatic code formatter ``black``. You can run it using + the nox session ``blacken``. This will eliminate many lint errors. Run via:: + + $ nox -s blacken + +- PEP8 compliance is required, with exceptions defined in the linter configuration. + If you have ``nox`` installed, you can test that you have not introduced + any non-compliant code via:: + + $ nox -s lint + +- In order to make ``nox -s lint`` run faster, you can set some environment + variables:: + + export GOOGLE_CLOUD_TESTING_REMOTE="upstream" + export GOOGLE_CLOUD_TESTING_BRANCH="main" + + By doing this, you are specifying the location of the most up-to-date + version of ``google-cloud-python``. The + remote name ``upstream`` should point to the official ``googleapis`` + checkout and the branch should be the default branch on that remote (``main``). + +- This repository contains configuration for the + `pre-commit `__ tool, which automates checking + our linters during a commit. If you have it installed on your ``$PATH``, + you can enable enforcing those checks via: + +.. code-block:: bash + + $ pre-commit install + pre-commit installed at .git/hooks/pre-commit + +Exceptions to PEP8: + +- Many unit tests use a helper method, ``_call_fut`` ("FUT" is short for + "Function-Under-Test"), which is PEP8-incompliant, but more readable. + Some also use a local variable, ``MUT`` (short for "Module-Under-Test"). + +******************** +Running System Tests +******************** + +- To run system tests, you can execute:: + + # Run all system tests + $ nox -s system + + # Run a single system test + $ nox -s system-3.12 -- -k + + + .. note:: + + System tests are only configured to run under Python 3.8, 3.9, 3.10, 3.11 and 3.12. + For expediency, we do not run them in older versions of Python 3. + + This alone will not run the tests. You'll need to change some local + auth settings and change some configuration in your project to + run all the tests. + +- System tests will be run against an actual project. You should use local credentials from gcloud when possible. See `Best practices for application authentication `__. Some tests require a service account. For those tests see `Authenticating as a service account `__. + +************* +Test Coverage +************* + +- The codebase *must* have 100% test statement coverage after each commit. + You can test coverage via ``nox -s cover``. + +****************************************************** +Documentation Coverage and Building HTML Documentation +****************************************************** + +If you fix a bug, and the bug requires an API or behavior modification, all +documentation in this package which references that API or behavior must be +changed to reflect the bug fix, ideally in the same commit that fixes the bug +or adds the feature. + +Build the docs via: + + $ nox -s docs + +************************* +Samples and code snippets +************************* + +Code samples and snippets live in the `samples/` catalogue. Feel free to +provide more examples, but make sure to write tests for those examples. +Each folder containing example code requires its own `noxfile.py` script +which automates testing. If you decide to create a new folder, you can +base it on the `samples/snippets` folder (providing `noxfile.py` and +the requirements files). + +The tests will run against a real Google Cloud Project, so you should +configure them just like the System Tests. + +- To run sample tests, you can execute:: + + # Run all tests in a folder + $ cd samples/snippets + $ nox -s py-3.8 + + # Run a single sample test + $ cd samples/snippets + $ nox -s py-3.8 -- -k + +******************************************** +Note About ``README`` as it pertains to PyPI +******************************************** + +The `description on PyPI`_ for the project comes directly from the +``README``. Due to the reStructuredText (``rst``) parser used by +PyPI, relative links which will work on GitHub (e.g. ``CONTRIBUTING.rst`` +instead of +``https://github.com/googleapis/google-cloud-python/blob/main/CONTRIBUTING.rst``) +may cause problems creating links or rendering the description. + +.. _description on PyPI: https://pypi.org/project/google-shopping-css + + +************************* +Supported Python Versions +************************* + +We support: + +- `Python 3.7`_ +- `Python 3.8`_ +- `Python 3.9`_ +- `Python 3.10`_ +- `Python 3.11`_ +- `Python 3.12`_ + +.. _Python 3.7: https://docs.python.org/3.7/ +.. _Python 3.8: https://docs.python.org/3.8/ +.. _Python 3.9: https://docs.python.org/3.9/ +.. _Python 3.10: https://docs.python.org/3.10/ +.. _Python 3.11: https://docs.python.org/3.11/ +.. _Python 3.12: https://docs.python.org/3.12/ + + +Supported versions can be found in our ``noxfile.py`` `config`_. + +.. _config: https://github.com/googleapis/google-cloud-python/blob/main/packages/google-shopping-css/noxfile.py + + +********** +Versioning +********** + +This library follows `Semantic Versioning`_. + +.. _Semantic Versioning: http://semver.org/ + +Some packages are currently in major version zero (``0.y.z``), which means that +anything may change at any time and the public API should not be considered +stable. + +****************************** +Contributor License Agreements +****************************** + +Before we can accept your pull requests you'll need to sign a Contributor +License Agreement (CLA): + +- **If you are an individual writing original source code** and **you own the + intellectual property**, then you'll need to sign an + `individual CLA `__. +- **If you work for a company that wants to allow you to contribute your work**, + then you'll need to sign a + `corporate CLA `__. + +You can sign these electronically (just scroll to the bottom). After that, +we'll be able to accept your pull requests. diff --git a/packages/google-shopping-css/LICENSE b/packages/google-shopping-css/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/packages/google-shopping-css/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/packages/google-shopping-css/MANIFEST.in b/packages/google-shopping-css/MANIFEST.in new file mode 100644 index 000000000000..e0a66705318e --- /dev/null +++ b/packages/google-shopping-css/MANIFEST.in @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Generated by synthtool. DO NOT EDIT! +include README.rst LICENSE +recursive-include google *.json *.proto py.typed +recursive-include tests * +global-exclude *.py[co] +global-exclude __pycache__ + +# Exclude scripts for samples readmegen +prune scripts/readme-gen diff --git a/packages/google-shopping-css/README.rst b/packages/google-shopping-css/README.rst new file mode 100644 index 000000000000..72ebc30f4cdb --- /dev/null +++ b/packages/google-shopping-css/README.rst @@ -0,0 +1,108 @@ +Python Client for CSS API +========================= + +|preview| |pypi| |versions| + +`CSS API`_: Programmatically manage your Comparison Shopping Service (CSS) account data at scale. + +- `Client Library Documentation`_ +- `Product Documentation`_ + +.. |preview| image:: https://img.shields.io/badge/support-preview-orange.svg + :target: https://github.com/googleapis/google-cloud-python/blob/main/README.rst#stability-levels +.. |pypi| image:: https://img.shields.io/pypi/v/google-shopping-css.svg + :target: https://pypi.org/project/google-shopping-css/ +.. |versions| image:: https://img.shields.io/pypi/pyversions/google-shopping-css.svg + :target: https://pypi.org/project/google-shopping-css/ +.. _CSS API: https://developers.google.com/comparison-shopping-services/api +.. _Client Library Documentation: https://googleapis.dev/python/google-shopping-css/latest +.. _Product Documentation: https://developers.google.com/comparison-shopping-services/api + +Quick Start +----------- + +In order to use this library, you first need to go through the following steps: + +1. `Select or create a Cloud Platform project.`_ +2. `Enable billing for your project.`_ +3. `Enable the CSS API.`_ +4. `Setup Authentication.`_ + +.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project +.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project +.. _Enable the CSS API.: https://developers.google.com/comparison-shopping-services/api +.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html + +Installation +~~~~~~~~~~~~ + +Install this library in a virtual environment using `venv`_. `venv`_ is a tool that +creates isolated Python environments. These isolated environments can have separate +versions of Python packages, which allows you to isolate one project's dependencies +from the dependencies of other projects. + +With `venv`_, it's possible to install this library without needing system +install permissions, and without clashing with the installed system +dependencies. + +.. _`venv`: https://docs.python.org/3/library/venv.html + + +Code samples and snippets +~~~~~~~~~~~~~~~~~~~~~~~~~ + +Code samples and snippets live in the `samples/`_ folder. + +.. _samples/: https://github.com/googleapis/google-cloud-python/tree/main/packages/google-shopping-css/samples + + +Supported Python Versions +^^^^^^^^^^^^^^^^^^^^^^^^^ +Our client libraries are compatible with all current `active`_ and `maintenance`_ versions of +Python. + +Python >= 3.7 + +.. _active: https://devguide.python.org/devcycle/#in-development-main-branch +.. _maintenance: https://devguide.python.org/devcycle/#maintenance-branches + +Unsupported Python Versions +^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Python <= 3.6 + +If you are using an `end-of-life`_ +version of Python, we recommend that you update as soon as possible to an actively supported version. + +.. _end-of-life: https://devguide.python.org/devcycle/#end-of-life-branches + +Mac/Linux +^^^^^^^^^ + +.. code-block:: console + + python3 -m venv + source /bin/activate + pip install google-shopping-css + + +Windows +^^^^^^^ + +.. code-block:: console + + py -m venv + .\\Scripts\activate + pip install google-shopping-css + +Next Steps +~~~~~~~~~~ + +- Read the `Client Library Documentation`_ for CSS API + to see other available methods on the client. +- Read the `CSS API Product documentation`_ to learn + more about the product and see How-to Guides. +- View this `README`_ to see the full list of Cloud + APIs that we cover. + +.. _CSS API Product documentation: https://developers.google.com/comparison-shopping-services/api +.. _README: https://github.com/googleapis/google-cloud-python/blob/main/README.rst diff --git a/packages/google-shopping-css/docs/CHANGELOG.md b/packages/google-shopping-css/docs/CHANGELOG.md new file mode 120000 index 000000000000..04c99a55caae --- /dev/null +++ b/packages/google-shopping-css/docs/CHANGELOG.md @@ -0,0 +1 @@ +../CHANGELOG.md \ No newline at end of file diff --git a/packages/google-shopping-css/docs/README.rst b/packages/google-shopping-css/docs/README.rst new file mode 120000 index 000000000000..89a0106941ff --- /dev/null +++ b/packages/google-shopping-css/docs/README.rst @@ -0,0 +1 @@ +../README.rst \ No newline at end of file diff --git a/packages/google-shopping-css/docs/_static/custom.css b/packages/google-shopping-css/docs/_static/custom.css new file mode 100644 index 000000000000..b0a295464b23 --- /dev/null +++ b/packages/google-shopping-css/docs/_static/custom.css @@ -0,0 +1,20 @@ +div#python2-eol { + border-color: red; + border-width: medium; +} + +/* Ensure minimum width for 'Parameters' / 'Returns' column */ +dl.field-list > dt { + min-width: 100px +} + +/* Insert space between methods for readability */ +dl.method { + padding-top: 10px; + padding-bottom: 10px +} + +/* Insert empty space between classes */ +dl.class { + padding-bottom: 50px +} diff --git a/packages/google-shopping-css/docs/_templates/layout.html b/packages/google-shopping-css/docs/_templates/layout.html new file mode 100644 index 000000000000..6316a537f72b --- /dev/null +++ b/packages/google-shopping-css/docs/_templates/layout.html @@ -0,0 +1,50 @@ + +{% extends "!layout.html" %} +{%- block content %} +{%- if theme_fixed_sidebar|lower == 'true' %} +
+ {{ sidebar() }} + {%- block document %} +
+ {%- if render_sidebar %} +
+ {%- endif %} + + {%- block relbar_top %} + {%- if theme_show_relbar_top|tobool %} + + {%- endif %} + {% endblock %} + +
+
+ As of January 1, 2020 this library no longer supports Python 2 on the latest released version. + Library versions released prior to that date will continue to be available. For more information please + visit Python 2 support on Google Cloud. +
+ {% block body %} {% endblock %} +
+ + {%- block relbar_bottom %} + {%- if theme_show_relbar_bottom|tobool %} + + {%- endif %} + {% endblock %} + + {%- if render_sidebar %} +
+ {%- endif %} +
+ {%- endblock %} +
+
+{%- else %} +{{ super() }} +{%- endif %} +{%- endblock %} diff --git a/packages/google-shopping-css/docs/conf.py b/packages/google-shopping-css/docs/conf.py new file mode 100644 index 000000000000..77f5f4ed2b4f --- /dev/null +++ b/packages/google-shopping-css/docs/conf.py @@ -0,0 +1,384 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# google-shopping-css documentation build configuration file +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import os +import shlex +import sys + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath("..")) + +# For plugins that can not read conf.py. +# See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85 +sys.path.insert(0, os.path.abspath(".")) + +__version__ = "" + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +needs_sphinx = "1.5.5" + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.intersphinx", + "sphinx.ext.coverage", + "sphinx.ext.doctest", + "sphinx.ext.napoleon", + "sphinx.ext.todo", + "sphinx.ext.viewcode", + "recommonmark", +] + +# autodoc/autosummary flags +autoclass_content = "both" +autodoc_default_options = {"members": True} +autosummary_generate = True + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# source_suffix = ['.rst', '.md'] +source_suffix = [".rst", ".md"] + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The root toctree document. +root_doc = "index" + +# General information about the project. +project = "google-shopping-css" +copyright = "2019, Google" +author = "Google APIs" + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The full version, including alpha/beta/rc tags. +release = __version__ +# The short X.Y version. +version = ".".join(release.split(".")[0:2]) + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = [ + "_build", + "**/.nox/**/*", + "samples/AUTHORING_GUIDE.md", + "samples/CONTRIBUTING.md", + "samples/snippets/README.rst", +] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = "alabaster" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = { + "description": "Google Cloud Client Libraries for google-shopping-css", + "github_user": "googleapis", + "github_repo": "google-cloud-python", + "github_banner": True, + "font_family": "'Roboto', Georgia, sans", + "head_font_family": "'Roboto', Georgia, serif", + "code_font_family": "'Roboto Mono', 'Consolas', monospace", +} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' +# html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# Now only 'ja' uses this config value +# html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +# html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = "google-shopping-css-doc" + +# -- Options for warnings ------------------------------------------------------ + + +suppress_warnings = [ + # Temporarily suppress this to avoid "more than one target found for + # cross-reference" warning, which are intractable for us to avoid while in + # a mono-repo. + # See https://github.com/sphinx-doc/sphinx/blob + # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 + "ref.python" +] + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + #'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + #'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + #'preamble': '', + # Latex figure (float) alignment + #'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ( + root_doc, + "google-shopping-css.tex", + "google-shopping-css Documentation", + author, + "manual", + ) +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ( + root_doc, + "google-shopping-css", + "google-shopping-css Documentation", + [author], + 1, + ) +] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + root_doc, + "google-shopping-css", + "google-shopping-css Documentation", + author, + "google-shopping-css", + "google-shopping-css Library", + "APIs", + ) +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + "python": ("https://python.readthedocs.org/en/latest/", None), + "google-auth": ("https://googleapis.dev/python/google-auth/latest/", None), + "google.api_core": ( + "https://googleapis.dev/python/google-api-core/latest/", + None, + ), + "grpc": ("https://grpc.github.io/grpc/python/", None), + "proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None), + "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), +} + + +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = True +napoleon_include_private_with_doc = False +napoleon_include_special_with_doc = True +napoleon_use_admonition_for_examples = False +napoleon_use_admonition_for_notes = False +napoleon_use_admonition_for_references = False +napoleon_use_ivar = False +napoleon_use_param = True +napoleon_use_rtype = True diff --git a/packages/google-shopping-css/docs/css_v1/account_labels_service.rst b/packages/google-shopping-css/docs/css_v1/account_labels_service.rst new file mode 100644 index 000000000000..8738d1d2b3e5 --- /dev/null +++ b/packages/google-shopping-css/docs/css_v1/account_labels_service.rst @@ -0,0 +1,10 @@ +AccountLabelsService +-------------------------------------- + +.. automodule:: google.shopping.css_v1.services.account_labels_service + :members: + :inherited-members: + +.. automodule:: google.shopping.css_v1.services.account_labels_service.pagers + :members: + :inherited-members: diff --git a/packages/google-shopping-css/docs/css_v1/accounts_service.rst b/packages/google-shopping-css/docs/css_v1/accounts_service.rst new file mode 100644 index 000000000000..c749ba01122c --- /dev/null +++ b/packages/google-shopping-css/docs/css_v1/accounts_service.rst @@ -0,0 +1,10 @@ +AccountsService +--------------------------------- + +.. automodule:: google.shopping.css_v1.services.accounts_service + :members: + :inherited-members: + +.. automodule:: google.shopping.css_v1.services.accounts_service.pagers + :members: + :inherited-members: diff --git a/packages/google-shopping-css/docs/css_v1/css_product_inputs_service.rst b/packages/google-shopping-css/docs/css_v1/css_product_inputs_service.rst new file mode 100644 index 000000000000..628fe0c9c05b --- /dev/null +++ b/packages/google-shopping-css/docs/css_v1/css_product_inputs_service.rst @@ -0,0 +1,6 @@ +CssProductInputsService +----------------------------------------- + +.. automodule:: google.shopping.css_v1.services.css_product_inputs_service + :members: + :inherited-members: diff --git a/packages/google-shopping-css/docs/css_v1/css_products_service.rst b/packages/google-shopping-css/docs/css_v1/css_products_service.rst new file mode 100644 index 000000000000..53df112bb801 --- /dev/null +++ b/packages/google-shopping-css/docs/css_v1/css_products_service.rst @@ -0,0 +1,10 @@ +CssProductsService +------------------------------------ + +.. automodule:: google.shopping.css_v1.services.css_products_service + :members: + :inherited-members: + +.. automodule:: google.shopping.css_v1.services.css_products_service.pagers + :members: + :inherited-members: diff --git a/packages/google-shopping-css/docs/css_v1/services_.rst b/packages/google-shopping-css/docs/css_v1/services_.rst new file mode 100644 index 000000000000..496beecd39dc --- /dev/null +++ b/packages/google-shopping-css/docs/css_v1/services_.rst @@ -0,0 +1,9 @@ +Services for Google Shopping Css v1 API +======================================= +.. toctree:: + :maxdepth: 2 + + account_labels_service + accounts_service + css_product_inputs_service + css_products_service diff --git a/packages/google-shopping-css/docs/css_v1/types_.rst b/packages/google-shopping-css/docs/css_v1/types_.rst new file mode 100644 index 000000000000..165888559c88 --- /dev/null +++ b/packages/google-shopping-css/docs/css_v1/types_.rst @@ -0,0 +1,6 @@ +Types for Google Shopping Css v1 API +==================================== + +.. automodule:: google.shopping.css_v1.types + :members: + :show-inheritance: diff --git a/packages/google-shopping-css/docs/index.rst b/packages/google-shopping-css/docs/index.rst new file mode 100644 index 000000000000..fd080362a227 --- /dev/null +++ b/packages/google-shopping-css/docs/index.rst @@ -0,0 +1,23 @@ +.. include:: README.rst + +.. include:: multiprocessing.rst + + +API Reference +------------- +.. toctree:: + :maxdepth: 2 + + css_v1/services_ + css_v1/types_ + + +Changelog +--------- + +For a list of all ``google-shopping-css`` releases: + +.. toctree:: + :maxdepth: 2 + + CHANGELOG diff --git a/packages/google-shopping-css/docs/multiprocessing.rst b/packages/google-shopping-css/docs/multiprocessing.rst new file mode 100644 index 000000000000..536d17b2ea65 --- /dev/null +++ b/packages/google-shopping-css/docs/multiprocessing.rst @@ -0,0 +1,7 @@ +.. note:: + + Because this client uses :mod:`grpc` library, it is safe to + share instances across threads. In multiprocessing scenarios, the best + practice is to create client instances *after* the invocation of + :func:`os.fork` by :class:`multiprocessing.pool.Pool` or + :class:`multiprocessing.Process`. diff --git a/packages/google-shopping-css/google/shopping/css/__init__.py b/packages/google-shopping-css/google/shopping/css/__init__.py new file mode 100644 index 000000000000..793e6d80b6dd --- /dev/null +++ b/packages/google-shopping-css/google/shopping/css/__init__.py @@ -0,0 +1,113 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from google.shopping.css import gapic_version as package_version + +__version__ = package_version.__version__ + + +from google.shopping.css_v1.services.account_labels_service.async_client import ( + AccountLabelsServiceAsyncClient, +) +from google.shopping.css_v1.services.account_labels_service.client import ( + AccountLabelsServiceClient, +) +from google.shopping.css_v1.services.accounts_service.async_client import ( + AccountsServiceAsyncClient, +) +from google.shopping.css_v1.services.accounts_service.client import ( + AccountsServiceClient, +) +from google.shopping.css_v1.services.css_product_inputs_service.async_client import ( + CssProductInputsServiceAsyncClient, +) +from google.shopping.css_v1.services.css_product_inputs_service.client import ( + CssProductInputsServiceClient, +) +from google.shopping.css_v1.services.css_products_service.async_client import ( + CssProductsServiceAsyncClient, +) +from google.shopping.css_v1.services.css_products_service.client import ( + CssProductsServiceClient, +) +from google.shopping.css_v1.types.accounts import ( + Account, + GetAccountRequest, + ListChildAccountsRequest, + ListChildAccountsResponse, + UpdateAccountLabelsRequest, +) +from google.shopping.css_v1.types.accounts_labels import ( + AccountLabel, + CreateAccountLabelRequest, + DeleteAccountLabelRequest, + ListAccountLabelsRequest, + ListAccountLabelsResponse, + UpdateAccountLabelRequest, +) +from google.shopping.css_v1.types.css_product_common import ( + Attributes, + Certification, + CssProductStatus, + ProductDetail, + ProductDimension, + ProductWeight, +) +from google.shopping.css_v1.types.css_product_inputs import ( + CssProductInput, + DeleteCssProductInputRequest, + InsertCssProductInputRequest, +) +from google.shopping.css_v1.types.css_products import ( + CssProduct, + GetCssProductRequest, + ListCssProductsRequest, + ListCssProductsResponse, +) + +__all__ = ( + "AccountLabelsServiceClient", + "AccountLabelsServiceAsyncClient", + "AccountsServiceClient", + "AccountsServiceAsyncClient", + "CssProductInputsServiceClient", + "CssProductInputsServiceAsyncClient", + "CssProductsServiceClient", + "CssProductsServiceAsyncClient", + "Account", + "GetAccountRequest", + "ListChildAccountsRequest", + "ListChildAccountsResponse", + "UpdateAccountLabelsRequest", + "AccountLabel", + "CreateAccountLabelRequest", + "DeleteAccountLabelRequest", + "ListAccountLabelsRequest", + "ListAccountLabelsResponse", + "UpdateAccountLabelRequest", + "Attributes", + "Certification", + "CssProductStatus", + "ProductDetail", + "ProductDimension", + "ProductWeight", + "CssProductInput", + "DeleteCssProductInputRequest", + "InsertCssProductInputRequest", + "CssProduct", + "GetCssProductRequest", + "ListCssProductsRequest", + "ListCssProductsResponse", +) diff --git a/packages/google-shopping-css/google/shopping/css/gapic_version.py b/packages/google-shopping-css/google/shopping/css/gapic_version.py new file mode 100644 index 000000000000..360a0d13ebdd --- /dev/null +++ b/packages/google-shopping-css/google/shopping/css/gapic_version.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +__version__ = "0.0.0" # {x-release-please-version} diff --git a/packages/google-shopping-css/google/shopping/css/py.typed b/packages/google-shopping-css/google/shopping/css/py.typed new file mode 100644 index 000000000000..29c7b3690029 --- /dev/null +++ b/packages/google-shopping-css/google/shopping/css/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-shopping-css package uses inline types. diff --git a/packages/google-shopping-css/google/shopping/css_v1/__init__.py b/packages/google-shopping-css/google/shopping/css_v1/__init__.py new file mode 100644 index 000000000000..e5e2f014ca9f --- /dev/null +++ b/packages/google-shopping-css/google/shopping/css_v1/__init__.py @@ -0,0 +1,102 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from google.shopping.css_v1 import gapic_version as package_version + +__version__ = package_version.__version__ + + +from .services.account_labels_service import ( + AccountLabelsServiceAsyncClient, + AccountLabelsServiceClient, +) +from .services.accounts_service import AccountsServiceAsyncClient, AccountsServiceClient +from .services.css_product_inputs_service import ( + CssProductInputsServiceAsyncClient, + CssProductInputsServiceClient, +) +from .services.css_products_service import ( + CssProductsServiceAsyncClient, + CssProductsServiceClient, +) +from .types.accounts import ( + Account, + GetAccountRequest, + ListChildAccountsRequest, + ListChildAccountsResponse, + UpdateAccountLabelsRequest, +) +from .types.accounts_labels import ( + AccountLabel, + CreateAccountLabelRequest, + DeleteAccountLabelRequest, + ListAccountLabelsRequest, + ListAccountLabelsResponse, + UpdateAccountLabelRequest, +) +from .types.css_product_common import ( + Attributes, + Certification, + CssProductStatus, + ProductDetail, + ProductDimension, + ProductWeight, +) +from .types.css_product_inputs import ( + CssProductInput, + DeleteCssProductInputRequest, + InsertCssProductInputRequest, +) +from .types.css_products import ( + CssProduct, + GetCssProductRequest, + ListCssProductsRequest, + ListCssProductsResponse, +) + +__all__ = ( + "AccountLabelsServiceAsyncClient", + "AccountsServiceAsyncClient", + "CssProductInputsServiceAsyncClient", + "CssProductsServiceAsyncClient", + "Account", + "AccountLabel", + "AccountLabelsServiceClient", + "AccountsServiceClient", + "Attributes", + "Certification", + "CreateAccountLabelRequest", + "CssProduct", + "CssProductInput", + "CssProductInputsServiceClient", + "CssProductStatus", + "CssProductsServiceClient", + "DeleteAccountLabelRequest", + "DeleteCssProductInputRequest", + "GetAccountRequest", + "GetCssProductRequest", + "InsertCssProductInputRequest", + "ListAccountLabelsRequest", + "ListAccountLabelsResponse", + "ListChildAccountsRequest", + "ListChildAccountsResponse", + "ListCssProductsRequest", + "ListCssProductsResponse", + "ProductDetail", + "ProductDimension", + "ProductWeight", + "UpdateAccountLabelRequest", + "UpdateAccountLabelsRequest", +) diff --git a/packages/google-shopping-css/google/shopping/css_v1/gapic_metadata.json b/packages/google-shopping-css/google/shopping/css_v1/gapic_metadata.json new file mode 100644 index 000000000000..f67d8636033b --- /dev/null +++ b/packages/google-shopping-css/google/shopping/css_v1/gapic_metadata.json @@ -0,0 +1,250 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.shopping.css_v1", + "protoPackage": "google.shopping.css.v1", + "schema": "1.0", + "services": { + "AccountLabelsService": { + "clients": { + "grpc": { + "libraryClient": "AccountLabelsServiceClient", + "rpcs": { + "CreateAccountLabel": { + "methods": [ + "create_account_label" + ] + }, + "DeleteAccountLabel": { + "methods": [ + "delete_account_label" + ] + }, + "ListAccountLabels": { + "methods": [ + "list_account_labels" + ] + }, + "UpdateAccountLabel": { + "methods": [ + "update_account_label" + ] + } + } + }, + "grpc-async": { + "libraryClient": "AccountLabelsServiceAsyncClient", + "rpcs": { + "CreateAccountLabel": { + "methods": [ + "create_account_label" + ] + }, + "DeleteAccountLabel": { + "methods": [ + "delete_account_label" + ] + }, + "ListAccountLabels": { + "methods": [ + "list_account_labels" + ] + }, + "UpdateAccountLabel": { + "methods": [ + "update_account_label" + ] + } + } + }, + "rest": { + "libraryClient": "AccountLabelsServiceClient", + "rpcs": { + "CreateAccountLabel": { + "methods": [ + "create_account_label" + ] + }, + "DeleteAccountLabel": { + "methods": [ + "delete_account_label" + ] + }, + "ListAccountLabels": { + "methods": [ + "list_account_labels" + ] + }, + "UpdateAccountLabel": { + "methods": [ + "update_account_label" + ] + } + } + } + } + }, + "AccountsService": { + "clients": { + "grpc": { + "libraryClient": "AccountsServiceClient", + "rpcs": { + "GetAccount": { + "methods": [ + "get_account" + ] + }, + "ListChildAccounts": { + "methods": [ + "list_child_accounts" + ] + }, + "UpdateLabels": { + "methods": [ + "update_labels" + ] + } + } + }, + "grpc-async": { + "libraryClient": "AccountsServiceAsyncClient", + "rpcs": { + "GetAccount": { + "methods": [ + "get_account" + ] + }, + "ListChildAccounts": { + "methods": [ + "list_child_accounts" + ] + }, + "UpdateLabels": { + "methods": [ + "update_labels" + ] + } + } + }, + "rest": { + "libraryClient": "AccountsServiceClient", + "rpcs": { + "GetAccount": { + "methods": [ + "get_account" + ] + }, + "ListChildAccounts": { + "methods": [ + "list_child_accounts" + ] + }, + "UpdateLabels": { + "methods": [ + "update_labels" + ] + } + } + } + } + }, + "CssProductInputsService": { + "clients": { + "grpc": { + "libraryClient": "CssProductInputsServiceClient", + "rpcs": { + "DeleteCssProductInput": { + "methods": [ + "delete_css_product_input" + ] + }, + "InsertCssProductInput": { + "methods": [ + "insert_css_product_input" + ] + } + } + }, + "grpc-async": { + "libraryClient": "CssProductInputsServiceAsyncClient", + "rpcs": { + "DeleteCssProductInput": { + "methods": [ + "delete_css_product_input" + ] + }, + "InsertCssProductInput": { + "methods": [ + "insert_css_product_input" + ] + } + } + }, + "rest": { + "libraryClient": "CssProductInputsServiceClient", + "rpcs": { + "DeleteCssProductInput": { + "methods": [ + "delete_css_product_input" + ] + }, + "InsertCssProductInput": { + "methods": [ + "insert_css_product_input" + ] + } + } + } + } + }, + "CssProductsService": { + "clients": { + "grpc": { + "libraryClient": "CssProductsServiceClient", + "rpcs": { + "GetCssProduct": { + "methods": [ + "get_css_product" + ] + }, + "ListCssProducts": { + "methods": [ + "list_css_products" + ] + } + } + }, + "grpc-async": { + "libraryClient": "CssProductsServiceAsyncClient", + "rpcs": { + "GetCssProduct": { + "methods": [ + "get_css_product" + ] + }, + "ListCssProducts": { + "methods": [ + "list_css_products" + ] + } + } + }, + "rest": { + "libraryClient": "CssProductsServiceClient", + "rpcs": { + "GetCssProduct": { + "methods": [ + "get_css_product" + ] + }, + "ListCssProducts": { + "methods": [ + "list_css_products" + ] + } + } + } + } + } + } +} diff --git a/packages/google-shopping-css/google/shopping/css_v1/gapic_version.py b/packages/google-shopping-css/google/shopping/css_v1/gapic_version.py new file mode 100644 index 000000000000..360a0d13ebdd --- /dev/null +++ b/packages/google-shopping-css/google/shopping/css_v1/gapic_version.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +__version__ = "0.0.0" # {x-release-please-version} diff --git a/packages/google-shopping-css/google/shopping/css_v1/py.typed b/packages/google-shopping-css/google/shopping/css_v1/py.typed new file mode 100644 index 000000000000..29c7b3690029 --- /dev/null +++ b/packages/google-shopping-css/google/shopping/css_v1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-shopping-css package uses inline types. diff --git a/packages/google-shopping-css/google/shopping/css_v1/services/__init__.py b/packages/google-shopping-css/google/shopping/css_v1/services/__init__.py new file mode 100644 index 000000000000..89a37dc92c5a --- /dev/null +++ b/packages/google-shopping-css/google/shopping/css_v1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/packages/google-shopping-css/google/shopping/css_v1/services/account_labels_service/__init__.py b/packages/google-shopping-css/google/shopping/css_v1/services/account_labels_service/__init__.py new file mode 100644 index 000000000000..450eca085f59 --- /dev/null +++ b/packages/google-shopping-css/google/shopping/css_v1/services/account_labels_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .async_client import AccountLabelsServiceAsyncClient +from .client import AccountLabelsServiceClient + +__all__ = ( + "AccountLabelsServiceClient", + "AccountLabelsServiceAsyncClient", +) diff --git a/packages/google-shopping-css/google/shopping/css_v1/services/account_labels_service/async_client.py b/packages/google-shopping-css/google/shopping/css_v1/services/account_labels_service/async_client.py new file mode 100644 index 000000000000..52ddd0874fb1 --- /dev/null +++ b/packages/google-shopping-css/google/shopping/css_v1/services/account_labels_service/async_client.py @@ -0,0 +1,664 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry_async as retries +from google.api_core.client_options import ClientOptions +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.shopping.css_v1 import gapic_version as package_version + +try: + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore + +from google.shopping.css_v1.services.account_labels_service import pagers +from google.shopping.css_v1.types import accounts_labels + +from .client import AccountLabelsServiceClient +from .transports.base import DEFAULT_CLIENT_INFO, AccountLabelsServiceTransport +from .transports.grpc_asyncio import AccountLabelsServiceGrpcAsyncIOTransport + + +class AccountLabelsServiceAsyncClient: + """Manages Merchant Center and CSS accounts labels.""" + + _client: AccountLabelsServiceClient + + DEFAULT_ENDPOINT = AccountLabelsServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = AccountLabelsServiceClient.DEFAULT_MTLS_ENDPOINT + + account_label_path = staticmethod(AccountLabelsServiceClient.account_label_path) + parse_account_label_path = staticmethod( + AccountLabelsServiceClient.parse_account_label_path + ) + common_billing_account_path = staticmethod( + AccountLabelsServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + AccountLabelsServiceClient.parse_common_billing_account_path + ) + common_folder_path = staticmethod(AccountLabelsServiceClient.common_folder_path) + parse_common_folder_path = staticmethod( + AccountLabelsServiceClient.parse_common_folder_path + ) + common_organization_path = staticmethod( + AccountLabelsServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + AccountLabelsServiceClient.parse_common_organization_path + ) + common_project_path = staticmethod(AccountLabelsServiceClient.common_project_path) + parse_common_project_path = staticmethod( + AccountLabelsServiceClient.parse_common_project_path + ) + common_location_path = staticmethod(AccountLabelsServiceClient.common_location_path) + parse_common_location_path = staticmethod( + AccountLabelsServiceClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AccountLabelsServiceAsyncClient: The constructed client. + """ + return AccountLabelsServiceClient.from_service_account_info.__func__(AccountLabelsServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AccountLabelsServiceAsyncClient: The constructed client. + """ + return AccountLabelsServiceClient.from_service_account_file.__func__(AccountLabelsServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return AccountLabelsServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> AccountLabelsServiceTransport: + """Returns the transport used by the client instance. + + Returns: + AccountLabelsServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial( + type(AccountLabelsServiceClient).get_transport_class, + type(AccountLabelsServiceClient), + ) + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, AccountLabelsServiceTransport] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the account labels service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.AccountLabelsServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = AccountLabelsServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def list_account_labels( + self, + request: Optional[Union[accounts_labels.ListAccountLabelsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListAccountLabelsAsyncPager: + r"""Lists the labels assigned to an account. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.shopping import css_v1 + + async def sample_list_account_labels(): + # Create a client + client = css_v1.AccountLabelsServiceAsyncClient() + + # Initialize request argument(s) + request = css_v1.ListAccountLabelsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_account_labels(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.shopping.css_v1.types.ListAccountLabelsRequest, dict]]): + The request object. Request message for the ``ListAccountLabels`` method. + parent (:class:`str`): + Required. The parent account. + Format: accounts/{account} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.shopping.css_v1.services.account_labels_service.pagers.ListAccountLabelsAsyncPager: + Response message for the ListAccountLabels method. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = accounts_labels.ListAccountLabelsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_account_labels, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListAccountLabelsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def create_account_label( + self, + request: Optional[ + Union[accounts_labels.CreateAccountLabelRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + account_label: Optional[accounts_labels.AccountLabel] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> accounts_labels.AccountLabel: + r"""Creates a new label, not assigned to any account. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.shopping import css_v1 + + async def sample_create_account_label(): + # Create a client + client = css_v1.AccountLabelsServiceAsyncClient() + + # Initialize request argument(s) + request = css_v1.CreateAccountLabelRequest( + parent="parent_value", + ) + + # Make the request + response = await client.create_account_label(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.shopping.css_v1.types.CreateAccountLabelRequest, dict]]): + The request object. Request message for the + 'CreateAccountLanel' method. + parent (:class:`str`): + Required. The parent account. + Format: accounts/{account} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + account_label (:class:`google.shopping.css_v1.types.AccountLabel`): + Required. The label to create. + This corresponds to the ``account_label`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.shopping.css_v1.types.AccountLabel: + Label assigned by CSS domain or CSS + group to one of its sub-accounts. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, account_label]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = accounts_labels.CreateAccountLabelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if account_label is not None: + request.account_label = account_label + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_account_label, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_account_label( + self, + request: Optional[ + Union[accounts_labels.UpdateAccountLabelRequest, dict] + ] = None, + *, + account_label: Optional[accounts_labels.AccountLabel] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> accounts_labels.AccountLabel: + r"""Updates a label. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.shopping import css_v1 + + async def sample_update_account_label(): + # Create a client + client = css_v1.AccountLabelsServiceAsyncClient() + + # Initialize request argument(s) + request = css_v1.UpdateAccountLabelRequest( + ) + + # Make the request + response = await client.update_account_label(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.shopping.css_v1.types.UpdateAccountLabelRequest, dict]]): + The request object. Request message for the ``UpdateAccountLabel`` method. + account_label (:class:`google.shopping.css_v1.types.AccountLabel`): + Required. The updated label. All + fields must be provided. + + This corresponds to the ``account_label`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.shopping.css_v1.types.AccountLabel: + Label assigned by CSS domain or CSS + group to one of its sub-accounts. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([account_label]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = accounts_labels.UpdateAccountLabelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if account_label is not None: + request.account_label = account_label + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_account_label, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("account_label.name", request.account_label.name),) + ), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_account_label( + self, + request: Optional[ + Union[accounts_labels.DeleteAccountLabelRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a label and removes it from all accounts to + which it was assigned. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.shopping import css_v1 + + async def sample_delete_account_label(): + # Create a client + client = css_v1.AccountLabelsServiceAsyncClient() + + # Initialize request argument(s) + request = css_v1.DeleteAccountLabelRequest( + name="name_value", + ) + + # Make the request + await client.delete_account_label(request=request) + + Args: + request (Optional[Union[google.shopping.css_v1.types.DeleteAccountLabelRequest, dict]]): + The request object. Request message for the + 'DeleteAccountLabel' method. + name (:class:`str`): + Required. The name of the label to + delete. Format: + accounts/{account}/labels/{label} + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = accounts_labels.DeleteAccountLabelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_account_label, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def __aenter__(self) -> "AccountLabelsServiceAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("AccountLabelsServiceAsyncClient",) diff --git a/packages/google-shopping-css/google/shopping/css_v1/services/account_labels_service/client.py b/packages/google-shopping-css/google/shopping/css_v1/services/account_labels_service/client.py new file mode 100644 index 000000000000..1d3050f21cab --- /dev/null +++ b/packages/google-shopping-css/google/shopping/css_v1/services/account_labels_service/client.py @@ -0,0 +1,883 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.shopping.css_v1 import gapic_version as package_version + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.shopping.css_v1.services.account_labels_service import pagers +from google.shopping.css_v1.types import accounts_labels + +from .transports.base import DEFAULT_CLIENT_INFO, AccountLabelsServiceTransport +from .transports.grpc import AccountLabelsServiceGrpcTransport +from .transports.grpc_asyncio import AccountLabelsServiceGrpcAsyncIOTransport +from .transports.rest import AccountLabelsServiceRestTransport + + +class AccountLabelsServiceClientMeta(type): + """Metaclass for the AccountLabelsService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[AccountLabelsServiceTransport]] + _transport_registry["grpc"] = AccountLabelsServiceGrpcTransport + _transport_registry["grpc_asyncio"] = AccountLabelsServiceGrpcAsyncIOTransport + _transport_registry["rest"] = AccountLabelsServiceRestTransport + + def get_transport_class( + cls, + label: Optional[str] = None, + ) -> Type[AccountLabelsServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class AccountLabelsServiceClient(metaclass=AccountLabelsServiceClientMeta): + """Manages Merchant Center and CSS accounts labels.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "css.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AccountLabelsServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AccountLabelsServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> AccountLabelsServiceTransport: + """Returns the transport used by the client instance. + + Returns: + AccountLabelsServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def account_label_path( + account: str, + label: str, + ) -> str: + """Returns a fully-qualified account_label string.""" + return "accounts/{account}/labels/{label}".format( + account=account, + label=label, + ) + + @staticmethod + def parse_account_label_path(path: str) -> Dict[str, str]: + """Parses a account_label path into its component segments.""" + m = re.match(r"^accounts/(?P.+?)/labels/(?P